def __init__(self):
        # self.cam = cv2.VideoCapture(0)
        # self.cam.set(3, 320)
        # self.cam.set(4, 240)
        self.cam = WebcamVideoStream(src=0, resolution=(640, 480)).start()
        self.fps = FPS().start()

        ret, self.frame = self.cam.read()

        self.suspend_tracking = SuspendTracking(teta=3)

        self.height, self.width = self.frame.shape[:2]
        self.kernel_erode = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,
                                                      (3, 3))
        self.kernel_dilate = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,
                                                       (7, 7))

        cv2.namedWindow('camshift')
        self.obj_select = RectSelector('camshift', self.onmouse)

        radius = 3
        n_point = 8 * radius
        self.lbpDesc = LBP(n_point, radius)

        self.HSV_CHANNELS = (
            (24, [0, 180], "hue"),  # Hue
            (8, [0, 256], "sat"),  # Saturation
            (8, [0, 256], "val")  # Value
        )

        self.show_backproj = False
        self.track_window = None
        self.histHSV = []
        self.track_box = None
Exemple #2
0
 def __init__(self):
     # Global Variables
     print("initialized")
     self.lc = lcm.LCM()
     lcmCameraPoseSub = self.lc.subscribe("CAMERA_POSE_CHANNEL",
                                          cameraPose_handler)
     lcmCameraPoseSub.set_queue_capacity(1)
     self.camera_pose = None
     self.vs = WebcamVideoStream(src=0).start()
def main():
    # Load Model and allocate tensors to the Coral USB device
    interpreter = tf.lite.Interpreter(
        model_path='../../../AI_Token_Recognition.tflite',
        experimental_delegates=[load_delegate('libedgetpu.so.1.0')])
    interpreter.allocate_tensors()

    # Get input and output tensors
    input_details = interpreter.get_input_details()
    output_details = interpreter.get_output_details()

    vc = WebcamVideoStream(src=0).start()

    while True:
        # read the current camera frame
        frame = vc.read()

        # show the current frame (untouched)
        cv2.imshow("My webcam", frame)

        # if 'x' key is pressed, exit the loop
        if cv2.waitKey(1) & 0XFF == ord('x'):
            break
        # if 'c' key is pressed, process the frame for OCR
        if cv2.waitKey(1) & 0xFF == ord('c'):
            img = cv2.resize(frame, (160, 160))

            cv2.imshow("Resized", img)

            # convert the frame to an array
            img_array = keras.preprocessing.image.img_to_array(img)
            img_array = tf.expand_dims(img_array, 0)

            # set the input to give it the image
            interpreter.set_tensor(input_details[0]['index'], img_array)
            interpreter.invoke()

            # get a prediction
            predictions = interpreter.get_tensor(output_details[0]['index'])
            score = tf.nn.softmax(predictions[0])

            # RESULT
            print(
                "This image most likely belongs to {} with a {:.2f} percent confidence."
                .format(np.argmax(score), 100 * np.max(score)))

    # close the window and de-allocate any associated memory usage
    cv2.destroyAllWindows()

    # close the already opened camera
    vc.stop()
class FaceAndEyeDetectorStream:
    def __init__(self, src=0):
        # initialize the video camera stream and read the first frame
        # from the stream
        self.webcam_stream = WebcamVideoStream(src).start()

        frame, frame_time = self.webcam_stream.read()

        self.frame_time = frame_time
        if frame is not None:
            (self.img, self.faces,
             self.face_features) = extract_image_features(frame)

        # initialize the variable used to indicate if the thread should
        # be stopped
        self.stopped = False

    def start(self):
        # start the thread to read frames from the video stream
        t = Thread(target=self.update, args=())
        t.daemon = True
        t.start()
        return self

    def update(self):
        # keep looping infinitely until the thread is stopped
        while True:
            # if the thread indicator variable is set, stop the thread
            #  print('updating')
            if self.stopped:
                #  print('returning')
                return

            # otherwise, read the next frame from the stream
            frame, frame_time = self.webcam_stream.read()
            self.frame_time = frame_time

            if frame is not None:
                (self.img, self.faces,
                 self.face_features) = extract_image_features(frame)
            #  print('the faces', self.faces)
            #  print('updated', self.grabbed)

    def read(self):
        # return the frame most recently read
        return (self.img, self.faces, self.face_features, self.frame_time)

    def stop(self):
        # indicate that the thread should be stopped
        self.webcam_stream.stop()
        self.stopped = True
    def __init__(self):
        # self.cam = cv2.VideoCapture(0)
        # self.cam.set(3, 320)
        # self.cam.set(4, 240)
        self.cam = WebcamVideoStream(src=0, resolution=(640, 480)).start()
        self.fps = FPS().start()

        ret, self.frame = self.cam.read()

        self.conf = {
            'ColorFrameNum': 7,
            'LBPFrameNum': 7,
            'MaxFrameDiffClr': 15,
            'MaxLBPFrameUpdate': 30,
            'L_Weight': 0.3,
            'A_Weight': 0.7,
            'B_Weight': 0.7
        }

        self.ColorCheck = AdaptiveThreshold(teta=3, max_lost_cnt=1)
        self.LBPCheck = AdaptiveThreshold(teta=2, max_lost_cnt=1)

        self.ColorDistance = LABDistance()
        self.LBPDistance = LocalBinaryPatterns(
            numPoints=8,
            radius=2,
            update_prev_hist=self.conf['MaxLBPFrameUpdate'])

        self.isLost = False
        self.isLBPLost = False

        self.height, self.width = self.frame.shape[:2]

        self.kernel_e = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))
        self.kernel_d = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (7, 7))
        self.kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5))

        cv2.namedWindow('camshift')
        self.obj_select = RectSelector('camshift', self.onmouse)

        self.LAB_CHANNELS = (
            (24, [0, 256], "light"),  # L
            (24, [0, 256], "a"),  # a
            (24, [0, 256], "b")  # b
        )

        self.show_backproj = False
        self.track_window = None
        self.histLAB = []
        self.track_box = None
    def __init__(self, src=0):
        # initialize the video camera stream and read the first frame
        # from the stream
        self.webcam_stream = WebcamVideoStream(src).start()

        frame, frame_time = self.webcam_stream.read()

        self.frame_time = frame_time
        if frame is not None:
            (self.img, self.faces,
             self.face_features) = extract_image_features(frame)

        # initialize the variable used to indicate if the thread should
        # be stopped
        self.stopped = False
    def _detect(self):
        """Class function to detect faces and eyes within faces"""
        video_stream = WebcamVideoStream()
        video_stream.start()
        # Cascade Classifiers
        face_cascade = cv2.CascadeClassifier(
            'haarcascades/haarcascade_frontalface_default.xml')
        eye_cascade = cv2.CascadeClassifier('haarcascades/haarcascade_eye.xml')
        while True:
            frame = video_stream.read()
            gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
            # Detecting faces and eyes
            faces = face_cascade.detectMultiScale(gray, 1.3, 5)
            for (x, y, w, h) in faces:
                cv2.rectangle(frame, (x, y), (x + w, y + h), (255, 0, 0), 2)
                roi_gray = gray[y:y + h, x:x + h]
                roi_color = frame[y:y + h, x:x + h]

                eyes = eye_cascade.detectMultiScale(roi_gray)

                if len(eyes) / len(faces) == 2:
                    for (ex, ey, ew, eh) in eyes:
                        cv2.rectangle(roi_color, (ex, ey), (ex + ew, ey + eh),
                                      (0, 255, 0), 1)

            if cv2.waitKey(1) & 0xFF == ord('q'):
                video_stream.stop()
                break

            # Display image
            cv2.imshow('Image', frame)
Exemple #8
0
def show_camera():
    # To flip the image, modify the flip_method parameter (0 and 2 are the most common)
    print(gstreamer_pipeline(flip_method=0))
    vs = WebcamVideoStream(src=gstreamer_pipeline()).start()
    #cap = cv2.VideoCapture(gstreamer_pipeline(flip_method=0), cv2.CAP_GSTREAMER)
    fps = FPS().start()
    take_snapshot = True

    while True:
        _ = cv2.namedWindow("CSI Camera", cv2.WINDOW_AUTOSIZE)
        # Window
        while cv2.getWindowProperty("CSI Camera", 0) >= 0:
            original_img = vs.read()
            if take_snapshot:
                save_snapshot(original_img, "original")
                take_snapshot = False
            filter = (60, 87, 120, 255, 50, 255)
            img = apply_hsv_filter(original_img, filter)
            img = erode(img, 1)
            img = dilate(img, 1)

            targets = find_contours(img)
            brColor = (255, 255, 255)
            for contour in targets:
                rr = cv2.minAreaRect(contour)
                pt = get_goal_center(rr)
                cv2.circle(original_img, pt, 6, brColor, 3)

            cv2.imshow("CSI Camera", original_img)
            # This also acts as
            keyCode = cv2.waitKey(30) & 0xFF
            # Stop the program on the ESC key
            if keyCode == 27:
                break

        fps.stop()
        print("[INFO] elasped time: {:.2f}".format(fps.elapsed()))
        print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))

        cap.release()
        cv2.destroyAllWindows()
    else:
        print("Unable to open camera")
Exemple #9
0
def main():
    model = keras.models.load_model('../../../AI_Token_Recognition')

    # Check the loaded model
    model.summary()

    vc = WebcamVideoStream(src=0).start()

    while True:
        # read the current camera frame
        frame = vc.read()

        # show the current frame (untouched)
        cv2.imshow("My webcam", frame)

        # if 'x' key is pressed, exit the loop
        if cv2.waitKey(1) & 0XFF == ord('x'):
            break
        # if 'c' key is pressed, process the frame for OCR
        if cv2.waitKey(1) & 0xFF == ord('c'):
            img = cv2.resize(frame, (160, 160))

            cv2.imshow("Resized", img)

            # convert the frame to an array
            img_array = keras.preprocessing.image.img_to_array(img)
            img_array = tf.expand_dims(img_array, 0)

            # get a prediction
            predictions = model.predict(img_array)
            score = tf.nn.softmax(predictions[0])

            # RESULT
            print(
                "This image most likely belongs to {} with a {:.2f} percent confidence."
                .format(np.argmax(score), 100 * np.max(score)))

    # close the window and de-allocate any associated memory usage
    cv2.destroyAllWindows()

    # close the already opened camera
    vc.stop()
def updateRobotPos():
    global cte, x, y, theta, env
    cap = WebcamVideoStream(src=int(sys.argv[1]))
    cap.start()

    print("Setting up...")
    setupImgs = []
    for i in range(15):
        frame = cap.read()
        setupImgs.append(frame)

        time.sleep(0.1)

    transformMatrix, reprojMatrix = setup(setupImgs)

    while True:
        frame = cap.read()
        #print(frame[50:60, 50:60, 1])

        x, y, theta, outImage = getRobotPosition(frame, transformMatrix)
        print(x, y, theta)
        print("")

        cte = y
        #updateImage(outImage)
        env.setRobotPose(x, y, theta)
        m = env.visualizeMap()
        #out = cv2.warpPerspective(m, reprojMatrix, (outImage.shape[1], outImage.shape[0]))
        #out = cv2.addWeighted(out, 0.5, outImage, 1 - 0.5, 0)
        updateImage(outImage)
        updateImage2(m)

        time.sleep(0.01)
def main():
    vc = WebcamVideoStream(src=0).start()

    reader = easyocr.Reader(['en'])

    while True:
        # read the current camera frame
        frame = vc.read()

        # show the current frame (untouched)
        cv2.imshow("My webcam", frame)

        # if 'x' key is pressed, exit the loop
        if cv2.waitKey(1) & 0XFF == ord('x'):
            break
        # if 'c' key is pressed, process the frame for OCR
        if cv2.waitKey(1) & 0xFF == ord('c'):
            # copy the image so it will be a cleaned image for tesseract
            img = vc.mask_frame()

            # DEBUG
            img_box = draw_boxes(img, reader)

            # draw the chart containing the image with boxes
            cv2.imshow("Tesseract", img_box)

            print_img_str(img, reader)

    # close the window and de-allocate any associated memory usage
    cv2.destroyAllWindows()

    # close the already opened camera
    vc.stop()
def updateRobotPos():
    global cte, x, y, theta, env
    cap = WebcamVideoStream(src=int(sys.argv[1]))
    cap.start()

    print("Setting up...")
    setupImgs = []
    for i in range(15):
        frame = cap.read()
        setupImgs.append(frame)

        time.sleep(0.1)

    transformMatrix = setup(setupImgs)

    while True:
        frame = cap.read()
        #print(frame[50:60, 50:60, 1])

        x, y, theta, outImage = getRobotPosition(frame, transformMatrix)
        print(x, y, theta)
        print("")

        cte = y
        updateImage(outImage)
        env.setRobotPose(x, y, theta)
        m = env.visualizeMap()
        updateImage2(m)

        time.sleep(0.01)
    def __init__(self,
                 src=1,
                 usePiCamera=False,
                 resolution=(320, 240),
                 framerate=32):
        # check to see if the picamera module should be used
        if usePiCamera:
            # only import the picamera packages unless we are
            # explicity told to do so -- this helps remove the
            # requirement of `picamera[array]` from desktops or
            # laptops that still want to use the `imutils` package
            from pivideostream import PiVideoStream

            # initialize the picamera stream and allow the camera
            # sensor to warmup
            self.stream = PiVideoStream(resolution=resolution,
                                        framerate=framerate)

        # otherwise, we are using OpenCV so initialize the webcam
        # stream
        else:
            self.stream = WebcamVideoStream(src=src)
Exemple #14
0
def main():
    # TODO; Load Model and set it to the Intel NCS2 USB

    vc = WebcamVideoStream(src=0).start()

    while True:
        # read the current camera frame
        frame = vc.read()

        # show the current frame (untouched)
        cv2.imshow("My webcam", frame)

        # if 'x' key is pressed, exit the loop
        if cv2.waitKey(1) & 0XFF == ord('x'):
            break
        # if 'c' key is pressed, process the frame for OCR
        if cv2.waitKey(1) & 0xFF == ord('c'):
            # convert the frame to an array
            img_array = keras.preprocessing.image.img_to_array(frame)
            img_array = tf.expand_dims(img_array, 0)

            # TODO: get a prediction using the NCS2

            # get a prediction
            #predictions = interpreter.get_tensor(output_details[0]['index'])
            #classes = predictions.argmax(axis=-1)
            #score = tf.nn.softmax(predictions[0])

            # RESULT
            #print(
            #    "This image most likely belongs to {} with a {:.2f} percent confidence."
            #    .format(classes[np.argmax(score)], 100 * np.max(score))
            #)

    # close the window and de-allocate any associated memory usage
    cv2.destroyAllWindows()

    # close the already opened camera
    vc.stop()
class VideoStream:
    def __init__(self,
                 src=1,
                 usePiCamera=False,
                 resolution=(320, 240),
                 framerate=32):
        # check to see if the picamera module should be used
        if usePiCamera:
            # only import the picamera packages unless we are
            # explicity told to do so -- this helps remove the
            # requirement of `picamera[array]` from desktops or
            # laptops that still want to use the `imutils` package
            from pivideostream import PiVideoStream

            # initialize the picamera stream and allow the camera
            # sensor to warmup
            self.stream = PiVideoStream(resolution=resolution,
                                        framerate=framerate)

        # otherwise, we are using OpenCV so initialize the webcam
        # stream
        else:
            self.stream = WebcamVideoStream(src=src)

    def start(self):
        # start the threaded video stream
        return self.stream.start()

    def update(self):
        # grab the next frame from the stream
        self.stream.update()

    def read(self):
        # return the current frame
        return self.stream.read()

    def stop(self):
        # stop the thread and release any resources
        self.stream.stop()
Exemple #16
0
def updateRobotPos():
    global x, y, theta, env, outImage, started, reprojMatrix, pp, frame, transformMatrix, initFinished
    cap = WebcamVideoStream(src=int(sys.argv[1]))
    cap.start()

    print("Setting up...")
    setupImgs = []
    for i in range(15):
        frame = cap.read()
        setupImgs.append(frame)

        time.sleep(0.1)

    transformMatrix, reprojMatrix = setup(setupImgs)

    initFinished = True
    while True:
        frame = cap.read()
        x, y, theta, outImage = getRobotPosition(frame, transformMatrix)
        env.setRobotPose(x, y, theta)

        time.sleep(0.01)
Exemple #17
0
    def sample_from_webcam(self):
        # cap = cv2.VideoCapture(0)
        vs = WebcamVideoStream(src=0).start()

        # test = cap.get(cv2.CAP_PROP_POS_MSEC)
        # ratio = cap.get(cv2.CAP_PROP_POS_AVI_RATIO)
        # frame_rate = cap.get(cv2.CAP_PROP_FPS)
        # width = cap.get(cv2.CAP_PROP_FRAME_WIDTH)
        # height = cap.get(cv2.CAP_PROP_FRAME_HEIGHT)
        # brightness = cap.get(cv2.CAP_PROP_BRIGHTNESS)
        # contrast = cap.get(cv2.CAP_PROP_CONTRAST)
        # saturation = cap.get(cv2.CAP_PROP_SATURATION)
        # hue = cap.get(cv2.CAP_PROP_HUE)
        # gain = cap.get(cv2.CAP_PROP_GAIN)
        # exposure = cap.get(cv2.CAP_PROP_EXPOSURE)
        # cap.set(cv2.CAP_PROP_FRAME_WIDTH, 320)
        # cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 240)
        # cap.set(cv2.CAP_PROP_EXPOSURE, -6.0)
        # cap.set(cv2.CAP_PROP_FPS, 30)
        # cap.set(cv2.CAP_PROP_GAIN, 0)
        # cap.set(cv2.CAP_PROP_BRIGHTNESS, 64)
        # cap.set(cv2.CAP_PROP_CONTRAST, 64)
        # cap.set(cv2.CAP_PROP_SATURATION, 64)
        # print("Test: ", test)
        # print("Ratio: ", ratio)
        # print("Frame Rate: ", frame_rate)
        # print("Height: ", height)
        # print("Width: ", width)
        # print("Brightness: ", brightness)
        # print("Contrast: ", contrast)
        # print("Saturation: ", saturation)
        # print("Hue: ", hue)
        # print("Gain: ", gain)
        # print("Exposure: ", exposure)

        arduino = serial.Serial('COM3', 1000000, timeout=.1)
        time.sleep(1)
        arduino.write('r'.encode())
        time.sleep(1)

        # arduino.write("Hello from Python".encode())
        # img = cv2.imread('red.png')
        while True:
            start = time.time()

            #s, img = cap.read()
            img = vs.read()
            #cv2.resize(img, None, 0.5, 0.5, cv2.INTER_LINEAR)
            self.sample_from_image(img)

            colors = []
            for color in self.left_leds_colors:
                colors.append(int(color[2]))
                colors.append(int(color[1]))
                colors.append(int(color[0]))

            for color in self.top_leds_colors:
                colors.append(int(color[2]))
                colors.append(int(color[1]))
                colors.append(int(color[0]))

            for color in self.right_leds_colors:
                colors.append(int(color[2]))
                colors.append(int(color[1]))
                colors.append(int(color[0]))

            for color in self.bottom_led_colors:
                colors.append(int(color[2]))
                colors.append(int(color[1]))
                colors.append(int(color[0]))

            bytes = struct.pack('c' + ('B' * len(colors)) + 'c', '@'.encode(),
                                *colors, '#'.encode())
            arduino.write(bytes)

            img_with_borders = self.show_image_with_colors(img)

            # cv2.imshow("Image with Borders", img_with_borders)
            # cv2.waitKey(30)
            # time.sleep(0.1)

            end = time.time()
            elapsed = end - start
            print(str(1 / elapsed) + " FPS")
Exemple #18
0
def main():
    #Select Webcam to Stream from
    vs = WebcamVideoStream(0)
    vs.start()

    #Initialize Config for tesseract
    #tesconfigargs = ('-l digits --psm 10')
    tesconfigargs = '--oem 0 -c tessedit_char_whitelist=0123456789-. --psm 10'

    #Set pytesseract CMD (Windows only)
    pytesseract.pytesseract.tesseract_cmd = 'C:\\Program Files (x86)\\Tesseract-OCR\\tesseract.exe'

    #Instanciate Logger
    setup_logger('log', r'C:\Temp\ImageAnalysis.csv')
    log = logging.getLogger('log')

    log.info("-------------------------------------Capture started----------------------------------------------")

    while True:

        frame = vs.read()
        cv2.imshow('frame', frame)

        #Color to GrayScale Filter
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

        #small Gaussian Blur Filter to filter out grainy Stuff
        gauss = cv2.GaussianBlur(gray, (5,5),0)

        #canny detector
        canny = cv2.Canny(gauss,100,200)
        cv2.imshow('canny', canny)

        _, cnts, _= cv2.findContours(canny, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
        cnts = sorted(cnts, key = cv2.contourArea, reverse = True)[:10]


        ## loop over our contours
        screenCnt = None
        for c in cnts:
            if cv2.contourArea(c) > 1000:

                #approximate the contour
                peri = cv2.arcLength(c, True)
                approx = cv2.approxPolyDP(c, 0.02 * peri, True)

                #if our approximated contour has four points, then
                #we can assume that we have found our screen
                if len(approx) == 4:
                    screenCnt = approx
                    cv2.drawContours(frame, [screenCnt], -1, (0, 255, 0), 3)
                    x,y,width,height = cv2.boundingRect(screenCnt)
                    croppedframe = frame[y: y + height , x: x + width] # both opencv and numpy are "row-major", so y goes first

                    digit = pytesseract.image_to_string(croppedframe, config=tesconfigargs)

                    # Print and Log recognized text
                    log.info(digit)
                    break

        cv2.imshow('frame', frame)
        key = cv2.waitKey(5) & 0xFF
        if key == 27:
           break

    #Do Cleanup
    vs.stop()
    cv2.destroyAllWindows()
class App(object):
    def __init__(self):
        # self.cam = cv2.VideoCapture(0)
        # self.cam.set(3, 320)
        # self.cam.set(4, 240)
        self.cam = WebcamVideoStream(src=0, resolution=(640, 480)).start()
        self.fps = FPS().start()

        ret, self.frame = self.cam.read()

        self.suspend_tracking = SuspendTracking(teta=3)

        self.height, self.width = self.frame.shape[:2]
        self.kernel_erode = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,
                                                      (3, 3))
        self.kernel_dilate = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,
                                                       (7, 7))

        cv2.namedWindow('camshift')
        self.obj_select = RectSelector('camshift', self.onmouse)

        radius = 3
        n_point = 8 * radius
        self.lbpDesc = LBP(n_point, radius)

        self.HSV_CHANNELS = (
            (24, [0, 180], "hue"),  # Hue
            (8, [0, 256], "sat"),  # Saturation
            (8, [0, 256], "val")  # Value
        )

        self.show_backproj = False
        self.track_window = None
        self.histHSV = []
        self.track_box = None

    def onmouse(self, rect):
        xmin, ymin, xmax, ymax = rect
        hsvRoi = self.hsv[ymin:ymax, xmin:xmax]

        self.calcHSVhist(hsvRoi)
        self.track_window = (xmin, ymin, xmax - xmin, ymax - ymin)
        self.init_suspend(hsvRoi)
        self.fps.reset()

    def init_suspend(self, hsvRoi):
        track_window_condition = self.track_window and self.track_window[
            2] > 0 and self.track_window[3] > 0
        if track_window_condition:
            self.camshift_algorithm()
            self.suspend_tracking.init(hsvRoi)

    def calcHSVhist(self, hsvRoi):
        self.histHSV = []
        for channel, param in enumerate(self.HSV_CHANNELS):
            # Init HSV histogram
            hist = cv2.calcHist([hsvRoi], [channel], None, [param[0]],
                                param[1])
            hist = cv2.normalize(hist, hist, 0, 255, cv2.NORM_MINMAX)
            self.histHSV.append(hist)
            # Show hist of each channel separately
            self.show_hist(hist, param[2])

    def calcBackProjection(self):
        ch_prob = []
        ch_back_proj_prob = []
        # back_proj_prob = np.ones(shape=(self.height, self.width), dtype=np.uint8) * 255
        # back_proj_prob = np.zeros(shape=(self.height, self.width), dtype=np.uint8)

        for channel, param in enumerate(self.HSV_CHANNELS):
            prob = cv2.calcBackProject([self.hsv], [channel],
                                       self.histHSV[channel], param[1], 1)
            cv2.imshow('Back projection ' + str(param[2]), prob)
            # ret, prob = cv2.threshold(prob, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
            ret, prob = cv2.threshold(prob, 70, 255, cv2.THRESH_BINARY)
            cv2.imshow('Back projection thresh ' + str(param[2]), prob)
            # prob = cv2.morphologyEx(prob, cv2.MORPH_ERODE, self.kernel_erode, iterations=2)
            # prob = cv2.morphologyEx(prob, cv2.MORPH_DILATE, self.kernel_dilate, iterations=3)
            # back_proj_prob = cv2.bitwise_and(back_proj_prob, prob)
            # back_proj_prob = cv2.addWeighted(back_proj_prob, 0.4, prob, 0.6, 0)
            ch_prob.append(prob)

        ch_back_proj_prob.append(
            cv2.addWeighted(ch_prob[0], 0.6, ch_prob[1], 0.4, 0))

        ch_back_proj_prob.append(
            cv2.addWeighted(ch_prob[0], 0.6, ch_prob[2], 0.4, 0))

        back_proj_prob = cv2.bitwise_and(ch_back_proj_prob[0],
                                         ch_back_proj_prob[1])
        ret, back_proj_prob = cv2.threshold(back_proj_prob, 150, 255,
                                            cv2.THRESH_BINARY)

        back_proj_prob = cv2.morphologyEx(back_proj_prob,
                                          cv2.MORPH_ERODE,
                                          self.kernel_erode,
                                          iterations=1)
        back_proj_prob = cv2.morphologyEx(back_proj_prob,
                                          cv2.MORPH_DILATE,
                                          self.kernel_erode,
                                          iterations=2)

        return back_proj_prob

    @staticmethod
    def show_hist(hist, channel='None'):
        bin_count = hist.shape[0]
        bin_w = 24
        img = np.zeros((256, bin_count * bin_w, 3), np.uint8)
        for i in range(bin_count):
            h = int(hist[i])
            if str(channel) == 'hue':
                cv2.rectangle(img, (i * bin_w + 2, 255),
                              ((i + 1) * bin_w - 2, 255 - h),
                              (int(180.0 * i / bin_count), 255, 255), -1)
            elif str(channel) == 'sat':
                cv2.rectangle(img, (i * bin_w + 2, 255),
                              ((i + 1) * bin_w - 2, 255 - h),
                              (180, int(255.0 * i / bin_count), 255), -1)
            elif str(channel) == 'val':
                cv2.rectangle(img, (i * bin_w + 2, 255),
                              ((i + 1) * bin_w - 2, 255 - h),
                              (180, 255, int(255.0 * i / bin_count)), -1)
            else:
                cv2.rectangle(img, (i * bin_w + 2, 255),
                              ((i + 1) * bin_w - 2, 255 - h), (180, 255, 255),
                              -1)
        img = cv2.cvtColor(img, cv2.COLOR_HSV2BGR)
        cv2.imshow('hist ' + str(channel), img)

    def camshift_algorithm(self):
        prob = self.calcBackProjection()
        term_crit = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 1)
        self.track_box, self.track_window = cv2.CamShift(
            prob, self.track_window, term_crit)

        if self.show_backproj:
            cv2.imshow("Back Projection", prob[..., np.newaxis])
        else:
            cv2.destroyWindow("Back Projection")

    def run(self):
        scaling_factor = 0.5
        while True:
            if not self.obj_select.dragging:
                ret, self.frame = self.cam.read()
                self.frame = cv2.resize(self.frame,
                                        None,
                                        fx=scaling_factor,
                                        fy=scaling_factor,
                                        interpolation=cv2.INTER_AREA)
                # blur_frame = cv2.GaussianBlur(self.frame, (21,21), 0)
                self.hsv = cv2.cvtColor(self.frame, cv2.COLOR_BGR2HSV)

            if ret:
                vis = self.frame.copy()

            track_window_condition = self.track_window and self.track_window[
                2] > 0 and self.track_window[3] > 0

            if track_window_condition and not self.suspend_tracking.is_suspend(
                    self.hsv, self.track_box):
                self.camshift_algorithm()

                try:
                    cv2.ellipse(vis, self.track_box, (0, 0, 255), 2)
                    pts = cv2.boxPoints(self.track_box)
                    pts = np.int0(pts)
                    cv2.polylines(vis, [pts], True, 255, 2)
                except:
                    print(self.track_box)
            else:
                cv2.putText(vis, 'Target Lost', (10, 230),
                            cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1,
                            cv2.LINE_AA)

            # frame processing throughput rate
            fps = self.fps.approx_compute()
            # print("FPS: {:.3f}".format(fps))
            cv2.putText(vis, 'FPS {:.3f}'.format(fps), (10, 10),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.3, (255, 255, 255), 1,
                        cv2.LINE_AA)

            self.obj_select.draw(vis)
            cv2.imshow('camshift', vis)

            ch = 0xFF & cv2.waitKey(1)
            if ch == 27:
                break
            if ch == ord('b'):
                self.show_backproj = not self.show_backproj
        cv2.destroyAllWindows()
from WebcamVideoStream import WebcamVideoStream
from tempimage import TempImage
from Cascading import Cascading
from Detect_Blur import DetectBlur
from compressImages import ImageCompression

# For this program I'm testing the use of thresholding by applying different filters
# and seeing how easy it is to detect corners and objects within the camera frame.

ap = argparse.ArgumentParser()
ap.add_argument("-c", "--conf", required=True,
                help="path to the json configuration file")
args = vars(ap.parse_args())


vs = WebcamVideoStream(src=0).start()       # so we want to read video in as a stream now so we can
#cap = cv2.VideoCapture(0)
filters = Filters()
cascades = Cascading()
blurDetection = DetectBlur(120)
imgCmpr = ImageCompression()


conf = json.load(open(args["conf"]))        # Load the json file.
client = None

# check to see if the Dropbox should be used
if conf["use_dropbox"]:
    # connect to dropbox and start the session authorization process
    flow = DropboxOAuth2FlowNoRedirect(conf["dropbox_key"], conf["dropbox_secret"])
    print "[INFO] Authorize this application: {}".format(flow.start())
Exemple #21
0
# import the necessary packages
import datetime
import time
import cv2
from WindowDestruction import WindowDestruction
from WebcamVideoStream import WebcamVideoStream
import numpy as np
from MotionDetection import MotionDetection
from Cascading import Cascading

motion = MotionDetection()
destroyWindows = WindowDestruction()
cascades = Cascading()
# camera = cv2.VideoCapture(0)
camera = WebcamVideoStream(src=0).start() 
time.sleep(0.25)
# initialize the first frame in the video stream
# WE WILL WANT TO UPDATE THIS VARIABLE TO OFTEN CHANGE THE FIRST FRAME
# BASED ON MOVEMENT OF MOTION...WILL BE TRICKY.

cascadeTime = False

# loop over the frames of the video
while True:
    # grab the current frame and initialize the occupied/unoccupied
    # text
    
    frame = camera.read()
    #saveFrame = frame                       # For storing a copy for encoding later on.
    frame = cv2.resize(frame, (500, 500))
    #(grabbed, frame) = camera.read()
Exemple #22
0
# 	fps.update()
#
# # stop the timer and display FPS information
# fps.stop()
# print("[INFO] elasped time: {:.2f}".format(fps.elapsed()))
# print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))
#
# # do a bit of cleanup
# stream.release()
# cv2.destroyAllWindows()
#

# created a *threaded* video stream, allow the camera sensor to warmup,
# and start the FPS counter
print("[INFO] sampling THREADED frames from webcam...")
vs = WebcamVideoStream(src=0).start()
fps = FPS().start()

# loop over some frames...this time using the threaded stream
while fps._numFrames < num_frames:
    # grab the frame from the threaded video stream and resize it
    # to have a maximum width of 400 pixels
    frame = vs.read()
    # frame = imutils.resize(frame, width=400)

    # check to see if the frame should be displayed to our screen
    # if args["display"] > 0:
    # cv2.imshow("Frame", frame)

    # update the FPS counter
    fps.update()
Exemple #23
0
            voiceInterface.enableVoiceDictation = False
            if isThreaded:
                videoCapture.stop()
            else:
                videoCapture.release()
            cv2.destroyAllWindows()
            raise SystemExit
        if key & 0xFF == ord('r'):
            print("Restarting....")
            util.restartProgram()


calibrator = HSVCalibrator(0, windowSize)
hsvRange = calibrator.calibrateHSVRange()

threadedVideoCapture = WebcamVideoStream(windowSize=windowSize).start()
calibrator.videoCapture.release()

voiceInterface = VoiceControlInterface()

# Create Properties
voiceInterface.createProperty("debug", "bool", False)
voiceInterface.createProperty("rotation_speed", "int", -30)
voiceInterface.createProperty("effect", "list",
                              (["Hello", "World", "Goodbye", "Hell"], 0))

# Create Actions
voiceInterface.createVoiceAction("SET", ActionMethods.SET)
voiceInterface.createVoiceAction("INDEX", ActionMethods.INDEX)

# Create Aliases
Exemple #24
0
    def initUI(self):
        self.fps = 10
        self.cap = None
        self.webcam = WebcamVideoStream(resolution=(960, 720), framerate=10)
        self.picam = PiVideoStream(resolution=(960, 720), framerate=10)
        self.recorder = Recorder(resolution=(960, 720))
        self.timer = None
        self.show_image = True

        control_layout = QtGui.QGridLayout()
        control_layout.setAlignment(Qt.AlignTop)
        control_subwindow = QtGui.QMdiSubWindow()
        control_subwindow.setWindowTitle("Controls")
        control_widget = QtGui.QWidget()
        control_subwindow.setWidget(control_widget)
        control_widget.setLayout(control_layout)

        self.parameter_label = QtGui.QLabel()
        control_layout.addWidget(self.parameter_label, 7, 0)

        self.show_image_button = QtGui.QRadioButton("Show Image")
        self.show_image_button.setChecked(True)
        self.show_image_button.toggled.connect(
            lambda: self.changeImage(self.show_image_button))
        control_layout.addWidget(self.show_image_button, 6, 0)

        self.show_orig_button = QtGui.QRadioButton("Show Original")
        self.show_orig_button.toggled.connect(
            lambda: self.changeImage(self.show_orig_button))
        control_layout.addWidget(self.show_orig_button, 6, 1)

        start_button = QtGui.QPushButton("Start")
        start_button.clicked.connect(self.start)
        start_button.setStyleSheet("background-color: rgb(242, 189, 12)")
        control_layout.addWidget(start_button, 4, 0)

        add_tab_button = QtGui.QPushButton("Add Process Chain")
        add_tab_button.clicked.connect(self.addChain)
        add_tab_button.setStyleSheet("background-color: rgb(242, 189, 12)")
        control_layout.addWidget(add_tab_button, 5, 0)

        stop_button = QtGui.QPushButton("Stop")
        stop_button.clicked.connect(self.stop)
        stop_button.setStyleSheet("background-color: rgb(242, 189, 12)")
        control_layout.addWidget(stop_button, 4, 1)

        source_label = QtGui.QLabel("Input")
        control_layout.addWidget(source_label, 0, 0)

        self.inputModel = InputListModel([self.webcam, self.picam])

        self.inputBox = QtGui.QComboBox(self)
        self.inputBox.activated.connect(self.inputChanged)
        #self.inputBox.addItem("Webcam", self.webcam)
        #self.inputBox.addItem("Picam", self.picam)
        #self.inputBox.addItem("Video File")
        self.inputBox.setModel(self.inputModel)
        control_layout.addWidget(self.inputBox, 0, 1)

        filter_label = QtGui.QLabel("Filter")
        control_layout.addWidget(filter_label, 1, 0)

        self.filterBox = QtGui.QComboBox(self)
        #self.filterBox.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
        #self.filterBox.customContextMenuRequested.connect(self.showMenu)
        self.filterBox.activated.connect(self.filterChanged)
        control_layout.addWidget(self.filterBox, 1, 1)
        filterList = []
        for m in inspect.getmembers(StandardFilter, inspect.isclass):
            if m[1].__module__ == 'StandardFilter':
                filter = m[1]()
                #self.filterBox.addItem(m[0], filter)
                filterList.append(filter)

        self.filterModel = FilterListModel(filterList)
        self.filterBox.setModel(self.filterModel)

        self.analyserModel = AnalyserListModel([Classifier("Classifier")])

        analysis_label = QtGui.QLabel("Analyser")
        control_layout.addWidget(analysis_label, 2, 0)

        self.analysisBox = QtGui.QComboBox(self)
        self.analysisBox.activated.connect(self.analyserChanged)
        control_layout.addWidget(self.analysisBox, 2, 1)
        self.analysisBox.setModel(self.analyserModel)

        output_label = QtGui.QLabel("Output")
        control_layout.addWidget(output_label, 3, 0)

        self.outputModel = OutputListModel(
            [Display(self),
             Recorder(resolution=(960, 720)),
             GestureToCubis()])

        self.outputBox = QtGui.QComboBox(self)
        #self.outputBox.addItem("Display", Display(self))
        #self.outputBox.addItem("Writer", Recorder(resolution=(960,720)))
        #self.outputBox.addItem("GestureToCubis", GestureToCubis())
        self.outputBox.activated.connect(self.outputChanged)

        self.outputBox.setModel(self.outputModel)
        control_layout.addWidget(self.outputBox, 3, 1)

        stream_layout = QtGui.QVBoxLayout()
        stream_layout.setAlignment(Qt.AlignCenter)
        stream_subwindow = QtGui.QMdiSubWindow()
        stream_subwindow.setWindowTitle("Output")
        stream_widget = QtGui.QWidget()
        stream_subwindow.setWidget(stream_widget)
        stream_widget.setLayout(stream_layout)
        self.video_frame = QtGui.QLabel()
        self.video_frame.setScaledContents(True)
        stream_layout.addWidget(self.video_frame)
        self.video_frame_2 = QtGui.QLabel()
        stream_layout.addWidget(self.video_frame_2)
        self.video_frame_3 = QtGui.QLabel()
        stream_layout.addWidget(self.video_frame_3)

        self.chain_tab_widget = QtGui.QTabWidget()
        chain_layout = QtGui.QVBoxLayout()
        chain_subwindow = QtGui.QMdiSubWindow()
        chain_subwindow.setWindowTitle("Process Chain")
        chain_widget = ProcessTabWidget(self)
        chain_widget.setLayout(chain_layout)
        chain_subwindow.setWidget(self.chain_tab_widget)
        self.chain_tab_widget.addTab(chain_widget, "Default")
        chain_layout.addWidget(chain_widget.process_chain)

        self.addSubWindow(chain_subwindow)
        self.addSubWindow(control_subwindow)
        self.addSubWindow(stream_subwindow)

        stream_subwindow.show()
        chain_subwindow.show()
        control_subwindow.show()
        self.tileSubWindows()
Exemple #25
0
def video_feed():
    return Response(gen(WebcamVideoStream().start(detection_queue)),
                    mimetype='multipart/x-mixed-replace; boundary=frame')
class App(object):
    def __init__(self):
        # self.cam = cv2.VideoCapture(0)
        # self.cam.set(3, 320)
        # self.cam.set(4, 240)
        self.cam = WebcamVideoStream(src=0, resolution=(640, 480)).start()
        self.fps = FPS().start()

        ret, self.frame = self.cam.read()

        self.conf = {
            'ColorFrameNum': 7,
            'LBPFrameNum': 7,
            'MaxFrameDiffClr': 15,
            'MaxLBPFrameUpdate': 30,
            'L_Weight': 0.3,
            'A_Weight': 0.7,
            'B_Weight': 0.7
        }

        self.ColorCheck = AdaptiveThreshold(teta=3, max_lost_cnt=1)
        self.LBPCheck = AdaptiveThreshold(teta=2, max_lost_cnt=1)

        self.ColorDistance = LABDistance()
        self.LBPDistance = LocalBinaryPatterns(
            numPoints=8,
            radius=2,
            update_prev_hist=self.conf['MaxLBPFrameUpdate'])

        self.isLost = False
        self.isLBPLost = False

        self.height, self.width = self.frame.shape[:2]

        self.kernel_e = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))
        self.kernel_d = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (7, 7))
        self.kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5))

        cv2.namedWindow('camshift')
        self.obj_select = RectSelector('camshift', self.onmouse)

        self.LAB_CHANNELS = (
            (24, [0, 256], "light"),  # L
            (24, [0, 256], "a"),  # a
            (24, [0, 256], "b")  # b
        )

        self.show_backproj = False
        self.track_window = None
        self.histLAB = []
        self.track_box = None

    def onmouse(self, rect):
        xmin, ymin, xmax, ymax = rect
        labRoi = self.lab[ymin:ymax, xmin:xmax]
        bgrRoi = self.frame[ymin:ymax, xmin:xmax]

        self.calcLABhist(labRoi)
        self.ColorDistance.init(bgrRoi)

        self.track_window = (xmin, ymin, xmax - xmin, ymax - ymin)
        # self.init_suspend(labRoi)
        self.isLost = False
        self.isLBPLost = False
        self.fps.reset()

    def calcLABhist(self, labRoi):
        self.histLAB = []
        for channel, param in enumerate(self.LAB_CHANNELS):
            # Init LAB histogram
            hist = cv2.calcHist([labRoi], [channel], None, [param[0]],
                                param[1])
            hist = cv2.normalize(hist, hist, 0, 255, cv2.NORM_MINMAX)
            self.histLAB.append(hist)
            # Show hist of each channel separately
            # self.show_hist(hist, param[2])

    def calcBackProjection(self):
        ch_prob = []
        ch_back_proj_prob = []

        for channel, param in enumerate(self.LAB_CHANNELS):
            prob = cv2.calcBackProject([self.lab], [channel],
                                       self.histLAB[channel], param[1], 1)
            cv2.imshow('Back projection ' + str(param[2]), prob)
            ret, prob = cv2.threshold(prob, 70, 255, cv2.THRESH_BINARY)
            cv2.imshow('Back projection thresh ' + str(param[2]), prob)
            # prob = cv2.morphologyEx(prob, cv2.MORPH_ERODE, self.kernel_e, iterations=1)
            # prob = cv2.morphologyEx(prob, cv2.MORPH_DILATE, self.kernel, iterations=1)
            prov = cv2.morphologyEx(prob,
                                    cv2.MORPH_CLOSE,
                                    self.kernel_e,
                                    iterations=2)
            ch_prob.append(prob)

        ch_back_proj_prob.append(
            cv2.addWeighted(ch_prob[0], self.conf['L_Weight'], ch_prob[1],
                            self.conf['A_Weight'], 0))

        ch_back_proj_prob.append(
            cv2.addWeighted(ch_prob[0], self.conf['L_Weight'], ch_prob[2],
                            self.conf['B_Weight'], 0))

        back_proj_prob = cv2.bitwise_and(ch_back_proj_prob[0],
                                         ch_back_proj_prob[1])
        ret, back_proj_prob = cv2.threshold(back_proj_prob, 150, 255,
                                            cv2.THRESH_BINARY)

        back_proj_prob = cv2.morphologyEx(back_proj_prob,
                                          cv2.MORPH_ERODE,
                                          self.kernel_e,
                                          iterations=1)
        back_proj_prob = cv2.morphologyEx(back_proj_prob,
                                          cv2.MORPH_DILATE,
                                          self.kernel_e,
                                          iterations=2)

        return back_proj_prob

    @staticmethod
    def show_hist(hist, channel='None'):
        bin_count = hist.shape[0]
        bin_w = 24
        img = np.zeros((256, bin_count * bin_w, 3), np.uint8)
        for i in range(bin_count):
            h = int(hist[i])
            if str(channel) == 'light':
                cv2.rectangle(img, (i * bin_w + 2, 255),
                              ((i + 1) * bin_w - 2, 255 - h),
                              (int(255.0 * i / bin_count), 255, 255), -1)
            elif str(channel) == 'a':
                cv2.rectangle(img, (i * bin_w + 2, 255),
                              ((i + 1) * bin_w - 2, 255 - h),
                              (255, int(255.0 * i / bin_count), 255), -1)
            elif str(channel) == 'b':
                cv2.rectangle(img, (i * bin_w + 2, 255),
                              ((i + 1) * bin_w - 2, 255 - h),
                              (255, 255, int(255.0 * i / bin_count)), -1)
            else:
                cv2.rectangle(img, (i * bin_w + 2, 255),
                              ((i + 1) * bin_w - 2, 255 - h), (255, 255, 255),
                              -1)
        img = cv2.cvtColor(img, cv2.COLOR_LAB2BGR)
        cv2.imshow('hist ' + str(channel), img)

    def camshift_algorithm(self):
        prob = self.calcBackProjection()
        term_crit = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 1)
        self.track_box, self.track_window = cv2.CamShift(
            prob, self.track_window, term_crit)

        if self.show_backproj:
            cv2.imshow("Back Projection", prob[..., np.newaxis])
        else:
            cv2.destroyWindow("Back Projection")

    def run(self):
        scaling_factor = 0.5
        last_frame_number = 0

        while True:
            if not self.obj_select.dragging:
                ret, self.frame = self.cam.read()
                self.frame = cv2.resize(self.frame,
                                        None,
                                        fx=scaling_factor,
                                        fy=scaling_factor,
                                        interpolation=cv2.INTER_AREA)
                self.lab = cv2.cvtColor(self.frame, cv2.COLOR_BGR2LAB)
                # self.lab = cv2.GaussianBlur(self.lab, (3,3), 0)
                kernel = np.ones((5, 5), np.float32) / 25
                self.lab = cv2.filter2D(self.lab, -1, kernel)
                self.gray = cv2.cvtColor(self.frame, cv2.COLOR_BGR2GRAY)

            if ret:
                vis = self.frame.copy()

            track_window_condition = ((self.track_window)
                                      and (self.track_window[2] > 0)
                                      and (self.track_window[3] > 0))

            target_lost = self.isLost and self.isLBPLost

            # Main proccess flow
            if track_window_condition:
                if not target_lost:
                    # Apply CamShift algorithm and get new track_box
                    self.camshift_algorithm()

                    if self.fps.NumFrame % self.conf[
                            'ColorFrameNum'] == 0 and not self.isLost:
                        color_distance = self.ColorDistance.update(
                            self.frame, self.track_box)
                        self.isLost = self.ColorCheck.target_lost(
                            color_distance)
                        print("[INFO] Color track is lost:  '{}'\n".format(
                            self.isLost))
                        if self.isLost:
                            last_frame_number = self.fps.NumFrame

                    if self.fps.NumFrame % self.conf[
                            'LBPFrameNum'] == 0 and not self.isLBPLost:
                        LBP_distance = self.LBPDistance.update(
                            self.gray, self.track_window)
                        self.isLBPLost = self.LBPCheck.target_lost(
                            LBP_distance)
                        print("[INFO] LBP track is lost:  '{}'\n".format(
                            self.isLBPLost))
                        if self.isLBPLost:
                            last_frame_number = self.fps.NumFrame

                    if self.fps.NumFrame - last_frame_number >= self.conf[
                            'MaxLBPFrameUpdate']:
                        self.isLBPLost = False
                        self.isLost = False

                    try:
                        cv2.ellipse(vis, self.track_box, (0, 0, 255), 2)
                        pts = cv2.boxPoints(self.track_box)
                        pts = np.int0(pts)
                        cv2.polylines(vis, [pts], True, 255, 2)
                    except:
                        print(self.track_box)
                else:
                    cv2.putText(vis, 'Target Lost', (10, 230),
                                cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 1,
                                cv2.LINE_AA)
                    # print("[INFO] Starting recovery proccess")

            elif not track_window_condition:
                cv2.putText(vis, 'Mark area of the object', (10, 230),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 1,
                            cv2.LINE_AA)

            # frame processing throughput rate
            fps = self.fps.approx_compute()
            cv2.putText(vis, 'FPS {:.3f}'.format(fps), (10, 10),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.3, (255, 255, 255), 1,
                        cv2.LINE_AA)

            self.obj_select.draw(vis)
            cv2.imshow('camshift', vis)

            ch = 0xFF & cv2.waitKey(1)
            if ch == 27:
                break
            if ch == ord('b'):
                self.show_backproj = not self.show_backproj
        cv2.destroyAllWindows()
Exemple #27
0
class Tracker():
    def __init__(self):
        # Global Variables
        print("initialized")
        self.lc = lcm.LCM()
        lcmCameraPoseSub = self.lc.subscribe("CAMERA_POSE_CHANNEL",
                                             cameraPose_handler)
        lcmCameraPoseSub.set_queue_capacity(1)
        self.camera_pose = None
        self.vs = WebcamVideoStream(src=0).start()

    def run(self):
        state = np.matrix('0.0;0.0;0.0;0.0')  # x, y, xd, yd,

        # P and Q matrices for EKF
        P = np.matrix('10.0,0.0,0.0,0.0; \
                    0.0,10.0,0.0,0.0; \
                    0.0,0.0,10.0,0.0; \
                    0.0,0.0,0.0,10.0')

        Q = np.matrix('2.0,0.0,0.0,0.0; \
                    0.0,2.0,0.0,0.0; \
                    0.0,0.0,2.0,0.0; \
                    0.0,0.0,0.0,2.0')

        measurement = np.matrix('0;0')
        np.set_printoptions(
            formatter={'float': lambda x: "{0:0.2f}".format(x)})

        # print basic info
        print('python ' + platform.python_version())
        print('opencv ' + cv2.__version__)
        print('numpy ' + np.version.version)
        video = cv2.VideoCapture(0)
        video.set(6, cv2.VideoWriter.fourcc('M', 'J', 'P', 'G'))
        video.set(3, 640)
        video.set(4, 480)

        # def main():
        # open camera

        start_time = time.time()

        prev_time = time.time()
        i = 0
        counter = 0
        stop_time = time.time() + 10
        while (True):
            if time.time() > stop_time:
                break

            now_time = time.time()
            dt = now_time - prev_time

            i += 1
            counter += 1
            # run the model every 0.01 s
            if (dt > 0.005):
                prev_time = now_time

                state, P, J = run_EKF_model(state, P, Q, dt)

            # read camera
            # ret, frame = cap.read()
            # select.select((video,), (), ())
            # image_data = video.read_and_queue()
            # raw_image = np.fromstring(image_data, dtype='uint8')
            # frame = cv2.imdecode(raw_image, cv2.IMREAD_UNCHANGED)
            # frame = cv2.imdecode(np.frombuffer(image_data, dtype=np.uint8), cv2.IMREAD_COLOR)
            # ret, frame = video.read()
            frame = self.vs.read()
            print("frame: {}".format(i))
            ret == False
            if ret == True:

                # For initilization, process the whole image, otherwise, utilize the predicted position
                if i < 10:
                    mask, cimg, (x, y, r) = recognize_center_without_EKF(frame)
                else:
                    mask, cimg, (x, y,
                                 r) = recognize_center(frame, state[0],
                                                       state[1])

                # if i == 5:
                #     break
                # if x==0:
                #     continue
                measurement[0] = x
                measurement[1] = y
                if (measurement[0] != 0) and (measurement[1] != 0):
                    print("run EKF")
                    state, P = run_EKF_measurement(state, measurement, P)
                else:
                    print("no motion detected, continue")
                    # i = 0
                    # continue
                print("x: {}, state 0: {}".format(x, state[0]))
                if (x != 0):
                    cv2.circle(cimg, (int(x), int(y)), 50, (255), 5)

                if (state[0] != 0):
                    cv2.circle(cimg, (int(state[0]), int(state[1])), 20, (255),
                               3)

                msg = camera_pose_xyt_t()
                msg.x = state[0]
                msg.y = state[1]
                self.lc.publish("CAMERA_POSE_CHANNEL", msg.encode())
                # pixel_coord = np.array([state[0],state[1],1])
                # world_2d_coord = transform_camera_to_2d(pixel_coord)
                # print(world_2d_coord)
                # cv2.imshow('all',cimg)

            # close
            if cv2.waitKey(0) & 0xFF == ord('q'):
                break

        print("Time {}, frames: {}".format(time.time() - start_time, counter))
        # clean up
        video.release()
        cv2.destroyAllWindows()
Exemple #28
0
	#instantiates the math and camera objects
	vision_math = Math()
	camera = Camera("Microsoft", c.M_HA, c.M_VA, c.M_DFOV)    # Microsoft camera
	#camera = Camera("mac", c.MAC_HA, c.MAC_VA, c.MAC_DFOV)    # mac internal camera
	camera.config()

	#initializes network tables
	if args.network:
		init_network_tables()
		table = NetworkTables.getTable('SmartDashboard')	
	#init_UDP_client()	
	# code borrowed from Adrian
	# https://www.pyimagesearch.com/2015/12/21/increasing-webcam-fps-with-python-and-opencv/

	#starts streaming the camera
	stream = WebcamVideoStream(src=0).start()
  	pipeline=GripPipeline()

	#instantiates contours because we have 2 blobs in our target
	cnt1 = Contour()
 	cnt2 = Contour()	
	font = cv2.FONT_HERSHEY_SIMPLEX

	#counts how many good frames it got per second as it was running
	fps = FPS().start()
	old_counter = 0
	
	t_end = time.time() + args.runtime
	while time.time() < t_end:
		(counter, frame) = stream.read()
		if frame is not None and counter <> old_counter:
Exemple #29
0
def main():
    # initialize the video stream and allow the camera sensor to warm up
    print("[INFO] starting video stream...")
    # cap = cv2.VideoCapture(0)
    vs = WebcamVideoStream(src=0).start()
    fps = FPS().start()  #Notes the start time
    width = 440

    with open("consumer_thread.csv", 'w', newline='') as file:
        writer = csv.writer(file)
        writer.writerow([
            "Thread Frame #",
            "Time spent in reading the frame (seconds) from queue",
            "Time spent performing inference on the frame (seconds)"
        ])
        # loop over the frames from the video stream
        #while True:
        while fps._numFrames < args["num_frames"]:
            # grab the frame from the threaded video stream and resize it
            # to have a maximum width of 400 pixels
            # Capture frame-by-frame
            start = timer()
            frame = vs.readFromQueue()
            end = timer()
            # if frame is not None then there was atleast one frame in queue
            # when read from the queue and returned. Else queue was empty.
            if frame is not None:
                # update the FPS counter
                fps.update()
                consumerThreadFrameNumber = fps._numFrames
                consumerThreadTimeTakenToReadThisFrame = (end - start)
                print(
                    "[INFO] Consumer Thread : Time taken to read frame number",
                    consumerThreadFrameNumber, "from queue is",
                    consumerThreadTimeTakenToReadThisFrame, "seconds")
                height = frame.shape[0]
                dim = (width, height)
                frame = cv2.resize(frame, dim, interpolation=cv2.INTER_AREA)
                # detect faces in the frame and determine if they are wearing a
                # face mask or not
                startInferenceTime = timer()
                (locs, preds) = detect_and_predict_mask(frame, net, model)
                endInferenceTime = timer()
                consumerThreadTimeTakenToPerformInference = (
                    endInferenceTime - startInferenceTime)
                print(
                    "[INFO] Consumer Thread : Time taken to performing inference on consumed frame number",
                    consumerThreadFrameNumber, "is",
                    consumerThreadTimeTakenToPerformInference, "seconds")
                writer.writerow([
                    consumerThreadFrameNumber,
                    consumerThreadTimeTakenToReadThisFrame,
                    consumerThreadTimeTakenToPerformInference
                ])
                for (box, pred) in zip(locs, preds):
                    # unpack the bounding box and predictions
                    (startX, startY, endX, endY) = box
                    (mask, withoutMask) = pred
                    label = "Mask" if mask > withoutMask else "No Mask"
                    color = (0, 255, 0) if label == "Mask" else (0, 0, 255)
                    # include the probability in the label
                    #label = "{}: {:.2f}%".format(label, max(mask, withoutMask) * 100)
                    # display the label and bounding box rectangle on the output
                    # frame
                    cv2.putText(frame, label, (startX, startY - 10),
                                cv2.FONT_HERSHEY_SIMPLEX, 0.45, color, 2)
                    cv2.rectangle(frame, (startX, startY), (endX, endY), color,
                                  2)
                    print("Showing frame")
                    # show the output frame
                    cv2.imshow("Output", frame)
                    #cv2.destroyAllWindows()
                    #key = cv2.waitKey(10) & 0xFF

                key = cv2.waitKey(1) & 0xFF
                # if the `q` key was pressed, break from the loop
                if key == ord("q"):
                    break

    fps.stop()
    vs.stop()

    print("[INFO] elasped time: {:.2f}".format(fps.elapsed()))
    print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))

    cv2.destroyAllWindows()
Exemple #30
0
kernel = numpy.ones((5,5), numpy.uint8)
#frameNum = 0 # TODO: find out how to determine unique frames
loops = 0
key = 0
centerX = 0
centerY = 0
angleToTarget = 0
display = 0
utils.hsvWrite(30,90,120,255,120,255) #Write Networktable values Green
#utils.hsvWrite(80,120,80,120,190,255) #Write Networktable values Blue
#utils.hsvWrite(130,120,80,200,190,255) #Write Networktable values Red
if (args["picamera"] > 0):
    cap = PiVideoStream().start()
else:
    cap = WebcamVideoStream().start()
time.sleep(2.0)
distanceTarget = -1
target = -1
centerX = 0
centerY = 0
r1x1 = -1
r1x2 = -1
r2x1 = -1
r2x2 = -1
while True:
    image = cap.read() #Capture frame
    #imageCopy = image

    image = imutils.resize(image, width=320) #resize - needed to allow rest of toolpath to work
    hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV) #Convert from BGR to HSV
import numpy as np
import time
import datetime
from Filters import Filters          # This should allow us to import the Filters file.
from WebcamVideoStream import WebcamVideoStream
from Cascading import Cascading
from Detect_Blur import DetectBlur
from compressImages import ImageCompression
#from MotionDetection import MotionDetection
from WindowDestruction import WindowDestruction
from MotionDetection import MotionDetection

# For this program I'm testing the use of thresholding by applying different filters
# and seeing how easy it is to detect corners and objects within the camera frame.

vs = WebcamVideoStream(src=0).start()       # so we want to read video in as a stream now so we can
#cap = cv2.VideoCapture(0)
filters = Filters()
fourcc = cv2.cv.CV_FOURCC(*'XVID')
out = cv2.VideoWriter('output.avi', fourcc, 20.0, (640, 480))
motion = MotionDetection()

destroyWindows = WindowDestruction()

firstFrame = None

while True:
    # grab the frame from the threaded video stream and resize it
    # to have a maximum width of 400 pixels
    frame = vs.read()
    #saveFrame = frame                       # For storing a copy for encoding later on.