示例#1
0
def threadBoth(source=0):
    """
    Dedicated thread for grabbing video frames with VideoGet object.
    Dedicated thread for showing video frames with VideoShow object.
    Main thread serves only to pass frames between VideoGet and
    VideoShow objects/threads.
    """

    video_getter = VideoGet(source).start()
    video_shower = VideoShow(video_getter.frame, '1').start()

    video_getter2 = VideoGet('http://192.168.1.4:8080/video').start()
    video_shower2 = VideoShow(video_getter2.frame, '2').start()
    cps = CountsPerSec().start()

    while True:
        # if video_getter.stopped or video_shower.stopped:
        #     video_shower.stop()
        #     video_getter.stop()
        #     break

        frame = video_getter.frame
        frame = putIterationsPerSec(frame, cps.countsPerSec())
        video_shower.frame = frame

        frame2 = video_getter2.frame
        frame2 = putIterationsPerSec(frame2, cps.countsPerSec())
        video_shower2.frame = frame2
        cps.increment()
示例#2
0
def threadBoth(source=0):
    """
    Dedicated thread for grabbing video frames with VideoGet object.
    Dedicated thread for showing video frames with VideoShow object.
    Main thread serves only to pass frames between VideoGet and
    VideoShow objects/threads.
    """

    video_getter = VideoGet(source).start()
    video_shower = VideoShow(video_getter.frame).start()
    cps = CountsPerSec().start()

    while True:
        if video_getter.stopped or video_shower.stopped:
            video_shower.stop()
            video_getter.stop()
            break

        frame = video_getter.frame
        # frame = putIterationsPerSec(frame, cps.countsPerSec())
        '''prediction, frame = detect_object(
                yolo,
                frame,
                
                postfix=FLAGS.postfix,
            )'''
        video_shower.frame = frame
        cps.increment()
示例#3
0
def main():
    finger_detect = FingerDetection().start()
    key_listener = KeyListener().start()
    video_getter = VideoGet(0).start()
    video_shower = VideoShow(video_getter.frame).start()

    while True:
        if video_getter.stopped or video_shower.stopped:
            video_shower.stop()
            video_getter.stop()
            break

        frame = video_getter.frame
        frame = cv2.flip(frame, 1)
        img = normalizeFrame(frame, x0, y0)
        frame = drawDetectionArea(frame, x0, y0, width, (0, 255, 0))
        cv2.putText(frame, u"NONE = NONE, ONE = LEFT", (0, 80), font, 1.0,
                    (0, 0, 255), 2, 1)
        cv2.putText(frame, "TWO = RIGHT, FOUR = DOWN", (0, 120), font, 1.0,
                    (0, 0, 255), 2, 1)
        cv2.putText(frame, "UP = FIVE", (0, 160), font, 1.0, (0, 0, 255), 2, 1)
        finger_detect.frame = img
        prediction = handDetect(frame, finger_detect.detect(), x0, y0)

        # if prediction == "NONE" or prediction == "FIVE":
        key_listener.prediction = prediction
        key_listener.handle()

        video_shower.frame = frame
示例#4
0
def predict_video(data_dir, sess, image_shape, logits, keep_prob, input_image):
    print('Predicting Video...')
    
    video_getter = VideoGet(data_dir, sess, image_shape, logits, keep_prob, input_image).start()
    video_shower = VideoShow(video_getter.frame).start()

    while True:
        if video_getter.stopped or video_shower.stopped:
            video_shower.stop()
            video_getter.stop()
            break

        frame = video_getter.frame
        video_shower.frame = frame
示例#5
0
def threadVideoShow(source=0):
    """
    Dedicated thread for showing video frames with VideoShow object.
    Main thread grabs video frames.
    """

    cap = cv2.VideoCapture(source)
    (grabbed, frame) = cap.read()
    video_shower = VideoShow(frame).start()
    cps = CountsPerSec().start()

    while True:
        (grabbed, frame) = cap.read()
        if not grabbed or video_shower.stopped:
            video_shower.stop()
            break

        # frame = putIterationsPerSec(frame, cps.countsPerSec())
        video_shower.frame = frame
        cps.increment()
示例#6
0
def read_zed(sess, image_shape, logits, keep_prob, input_image):
    count = 0
    video_zed = VideoZed(sess, image_shape, logits, keep_prob, input_image).start()

    while type(video_zed.frame) == type(None):
      if count == 3:
        exit("Error to open ZED")
      print("Waiting for zed")
      count+=1
      time.sleep(1)
      
    video_shower = VideoShow(video_zed.frame).start()

    while True:
        if video_zed.stopped or video_shower.stopped:
            video_shower.stop()
            video_zed.stop()
            break

        frame = video_zed.frame
        video_shower.frame = frame
示例#7
0
def threadBoth(source=0):
    """
    Dedicated thread for grabbing video frames with VideoGet object.
    Dedicated thread for showing video frames with VideoShow object.
    Main thread serves only to pass frames between VideoGet and
    VideoShow objects/threads.
    """

    video_getter = VideoGet(source).start()
    video_shower = VideoShow(video_getter.frame).start()
    cps = CountsPerSec().start()

    while True:
        if video_getter.stopped or video_shower.stopped:
            video_shower.stop()
            video_getter.stop()
            break

        frame = video_getter.frame
        frame = putIterationsPerSec(frame, cps.countsPerSec())
        video_shower.frame = frame
示例#8
0
def threadBoth(source=0):
    """
    Dedicated thread for grabbing video frames with VideoGet object.
    Dedicated thread for showing video frames with VideoShow object.
    Main thread serves only to pass frames between VideoGet and
    VideoShow objects/threads.
    """

    video_getter = VideoGet(source).start()
    video_shower = VideoShow(video_getter.frame).start()
    save_image = SaveImage(video_getter.frame)
    save_image.start()
    while True:
        if video_getter.stopped or video_shower.stopped:
            video_shower.stop()
            video_getter.stop()
            save_image.stop()
            #save_image.join()
            break

        frame = video_getter.frame
        video_shower.frame = frame
        save_image.frame = frame
示例#9
0
def gesture_guy():
    model = load_model('my_model.h5')
    model.load_weights('model_weights.h5')

    gb = GestureBrain(model)
    print("start ======")
    counter = 0
    i = 0
    cap = cv2.VideoCapture(0)
    (grabbed, frame) = cap.read()
    video_shower = VideoShow(frame).start()
    mode_i = None
    action2 = None
    while True:
        (grabbed, frame) = cap.read()
        if not grabbed or video_shower.stopped:
            video_shower.stop()
            break
        cv2.putText(frame, "Current Gesture: {}".format(action2), (10, 450),
                    cv2.FONT_HERSHEY_TRIPLEX, 0.8, (0, 255, 255))
        cv2.putText(frame, "Mode: {}".format(mode_i), (10, 400),
                    cv2.FONT_HERSHEY_TRIPLEX, 0.8, (0, 255, 255))
        video_shower.frame = frame
        counter += 1
        screen = np.array(cv2.resize(frame, (88, 50)))

        gb.push_img(screen)
        if counter != 40:
            continue
        else:
            counter = 0

        mode = [
            'Recognition mode', 'Explorer mode', 'Photo mode', 'Video mode'
        ]
        selectMode = mode[i]
        print(selectMode)
        action = gb.regonize()
        print("predict type :", action)
        if selectMode == mode[0]:
            if action == 0:
                print(labels_want[0])
            elif action == 1:
                print(labels_want[1])
            elif action == 2:
                print(labels_want[2])
            elif action == 3:
                print(labels_want[3])
            elif action == 4:
                print(labels_want[4])
            elif action == 5:
                print(labels_want[5])
            elif action == 6:
                print(labels_want[6])
            elif action == 7:
                print(labels_want[7])
            elif action == 8:
                print(labels_want[8])
            elif action == 9:
                print(labels_want[9])

            elif action == 10:
                print(labels_want[10])
                i += 1
                if i != 4:
                    continue
                else:
                    i = 0

            elif action == 11:
                print(labels_want[11])
            elif action == 12:
                print(labels_want[12])
        if selectMode == mode[2]:
            if action == 0:
                print(labels_want[0])
                pyautogui.press('left')
            elif action == 1:
                print(labels_want[1])
                pyautogui.press('right')
            elif action == 4:
                print(labels_want[4])
                pyautogui.press('left')
            elif action == 5:
                print(labels_want[5])
                pyautogui.press('right')
            elif action == 6:
                print(labels_want[6])
#                 pyautogui.hotkey('altleft','f4')
            elif action == 7:
                print(labels_want[7])
                pyautogui.hotkey('enter')
            elif action == 8:
                print(labels_want[8])
                pyautogui.hotkey('ctrl', '+')
            elif action == 9:
                print(labels_want[9])
                pyautogui.hotkey('ctrl', '-')

            elif action == 10:
                print(labels_want[10])
                i += 1
                if i != 4:
                    continue
                else:
                    i = 0

            elif action == 11:
                print(labels_want[11])
                pyautogui.press('winleft')

        if selectMode == mode[3]:
            if action == 0:
                print(labels_want[0])
                video_seekb(0.4)
            elif action == 1:
                print(labels_want[1])
                video_seekf(0.4)
            elif action == 4:
                print(labels_want[4])
                vol_up(0.8)
            elif action == 5:
                print(labels_want[5])
                vol_down(0.8)
            elif action == 6:
                print(labels_want[6])
                pyautogui.press('space')
            elif action == 7:
                print(labels_want[6])
                pyautogui.press('space')
            elif action == 9:
                print(labels_want[9])

            elif action == 10:
                print(labels_want[10])
                i += 1
                if i != 4:
                    continue
                else:
                    i = 0

            elif action == 11:
                print(labels_want[11])

        if selectMode == mode[1]:
            if action == 0:
                print(labels_want[0])
                pyautogui.press('left')
            elif action == 1:
                print(labels_want[1])
                pyautogui.press('right')
            elif action == 2:
                print(labels_want[2])
                pyautogui.press('down')
            elif action == 3:
                print(labels_want[3])
                pyautogui.press('up')
            elif action == 4:
                print(labels_want[4])
                pyautogui.press('enter')
            elif action == 5:
                print(labels_want[5])
                pyautogui.press('backspace')
            elif action == 6:
                print(labels_want[6])
#                 pyautogui.hotkey('altleft','f4')
            elif action == 9:
                print(labels_want[9])

            elif action == 10:
                print(labels_want[10])
                i += 1
                if i != 4:
                    continue
                else:
                    i = 0

            elif action == 11:
                print(labels_want[11])
                pyautogui.press('winleft')


#         if selectMode == mode[4]:
#             if action == 0 :
#                 print(labels_want[0])
#                 pyautogui.press('left')
#             elif action == 1 :
#                 print(labels_want[1])
#                 pyautogui.press('right')
#             elif action == 2 :
#                 print(labels_want[2])
#                 pyautogui.press('down')
#             elif action == 3 :
#                 print(labels_want[3])
#                 pyautogui.press('up')
#             elif action == 4 :
#                 print(labels_want[4])
#                 pyautogui.press('pagedown')
#             elif action == 5 :
#                 print(labels_want[5])
#                 pyautogui.press('pageup')

#             elif action == 10:
#                 print(labels_want[10])
#                 i += 1
#                 if i != 5 :
#                     continue
#                 else:
#                     i = 0

#             elif action == 11:
#                 print(labels_want[10])
#                 pyautogui.press('winleft')

#         cv2.putText(frame, "{}".format(action),(10, 450), cv2.FONT_HERSHEY_SIMPLEX, 1.0, (255, 255, 255))
        mode_i = mode[i]
        action2 = labels_want[action[0]]
示例#10
0
def threadBoth(source=1):
    """
    Dedicated thread for grabbing video frames with VideoGet object.
    Dedicated thread for showing video frames with VideoShow object.
    Main thread serves only to pass frames between VideoGet and
    VideoShow objects/threads.
    """
    video_getter = VideoGet(source).start()
    SIFT = SIFTThread(video_getter.frame).start()
    video_shower = VideoShow(video_getter.frame).start()
    fgbg = cv.createBackgroundSubtractorMOG2()

    frameCount = 0
    oldPoint = None

    while True:
        if video_getter.stopped or video_shower.stopped:
            video_shower.stop()
            video_getter.stop()
            break

        frame = video_getter.frame
        SIFT.frame = frame
        # apply mask to extract forgeound object
        fgmask = fgbg.apply(frame)
        # preprocessing: blurring and eroding to remove small finnicky bits
        kernel = np.ones((5, 5), np.uint8)
        morph = cv.medianBlur(fgmask, 13)
        morph = cv.erode(morph, kernel, iterations=3)

        # get the contours
        _, contours, _ = cv.findContours(morph, cv.RETR_TREE,
                                         cv.CHAIN_APPROX_SIMPLE)
        if len(contours) != 0:
            # find the biggest countour by area
            c = max(contours, key=cv.contourArea)
            area = cv.contourArea(c)
            # make sure contour area is bigger than threshold, don't want little contours
            if area > threshold_area:
                # determine the top-most extreme points along the contour
                extTop = tuple(c[c[:, :, 1].argmin()][0])
                # assign first value
                if (oldPoint is None):
                    oldPoint = extTop
                # if the finger stays for a few frames, we know someone is pointing
                if ((oldPoint[0] - 5 <= extTop[0] <= oldPoint[0] + 5)
                        and (oldPoint[1] - 5 <= extTop[1] <= oldPoint[1] + 5)):
                    if (extTop[0] != 0 and extTop[1] != 0):
                        frameCount += 1
                        if frameCount == 20:
                            frameCount = 0
                            SIFT.run()
                            #print([SIFT.newCoords.astype(int)])
                            for i in range(len(SIFT.zone.coords)):
                                res = cv.pointPolygonTest(SIFT.zone.coords[i],
                                                          extTop,
                                                          measureDist=False)
                                print(res)
                                if (res == 1):
                                    webbrowser.open(SIFT.zone.zones[i][1],
                                                    new=2)
                    oldPoint = extTop
                else:
                    frameCount = 0
                    oldPoint = extTop
                # draw a circle at the top-most point
                cv.circle(frame, extTop, 8, (100, 0, 255), -1)

        video_shower.frame = frame
示例#11
0
    def startDetecting(self):
        
        capture = VideoGet(0).start()
        video_shower = VideoShow(capture.frame).start()   

        while capture.isOpened():
            if capture.stopped or video_shower.stopped:
                video_shower.stop()
                capture.stop()
                break
            #Chụp khung hình từ camera
            frame = capture.frame
            
            # Nhận dữ liệu tay từ cửa sổ phụ hình chữ nhật  
            cv2.rectangle(frame,(100,100),(300,300),(0,255,0),0)
            crop_image = frame[100:300, 100:300]
            
            #1.
            # Áp dụng Gaussian blur
            with concurrent.futures.ThreadPoolExecutor(max_workers=5) as executor:
                exMask = executor.submit(self.maskHSV, crop_image)
                mask = exMask.result()

            #2.
            # Tìm đường viền (contours)
            with concurrent.futures.ThreadPoolExecutor(max_workers=5) as executor:
                exContours = executor.submit(self.findContours, mask)
                contours, hierarchy = exContours.result()
            
            #3.
            try:
                contour = max(contours, key = lambda x: cv2.contourArea(x))
                
                x,y,w,h = cv2.boundingRect(contour)
                cv2.rectangle(crop_image,(x,y),(x+w,y+h),(0,0,255),0)
                
                hull = cv2.convexHull(contour)
                
                drawing = np.zeros(crop_image.shape,np.uint8)
                cv2.drawContours(drawing,[contour],-1,(0,255,0),0)
                cv2.drawContours(drawing,[hull],-1,(0,0,255),0)
                
                hull = cv2.convexHull(contour, returnPoints=False)
                defects = cv2.convexityDefects(contour,hull)
                
                count_defects = 0
            
                for i in range(defects.shape[0]):
                    s,e,f,d = defects[i,0]
                    start = tuple(contour[s][0])
                    end = tuple(contour[e][0])
                    far = tuple(contour[f][0])

                    # 4.
                    #angle = 360;
                    with concurrent.futures.ThreadPoolExecutor(max_workers=5) as executor:
                        exAngle = executor.submit(self.findAngle, start, end, far)
                        angle = exAngle.result()

                    if angle <= 90:
                        count_defects += 1
                        cv2.circle(crop_image,far,1,[0,0,255],-1)

                    cv2.line(crop_image,start,end,[0,255,0],2)

                if count_defects >= 4:
                    pyautogui.press('space')
                    cv2.putText(frame,"JUMP", (450,110), cv2.FONT_HERSHEY_SIMPLEX, 2, 2, 2)

            except:
                pass

            video_shower.frame = frame

            if cv2.waitKey(1) == ord('q'):
                capture.release()
                cv2.destroyAllWindows()
                break 
示例#12
0
import numpy as np
import rpyc
import cv2
from VideoShow import VideoShow

conn = rpyc.connect("localhost", 18861)

y = conn.root.threadBoth(0)

print(len(y))
nparr = np.fromstring(y[0], np.uint8)
img = cv2.imdecode(nparr, cv2.IMREAD_COLOR)

video_shower = VideoShow(img).start()

for i in y:
    nparr = np.fromstring(i, np.uint8)
    img = cv2.imdecode(nparr, cv2.IMREAD_COLOR)

    video_shower.frame = img
示例#13
0
def thread_video(input):
    """
    Dedicated thread for grabbing video frames with VideoGet object.
    Main thread shows video frames.
	"""

    video_getter = VideoGet(input).start()
    video_shower = VideoShow(video_getter.frame).start()

    while True:
        if video_getter.stopped or video_shower.stopped:
            video_shower.stop()
            video_getter.stop()
            break

        frame = video_getter.frame
        rgb_frame = frame[:, :, ::-1]

        # Find all the faces and face encodings in the current frame of video
        face_location = face_recognition.face_locations(rgb_frame)
        if len(face_location) == 0:
            pass
        elif len(face_location) > 1:
            pass
        else:
            unknown_face_encoding = face_recognition.face_encodings(
                rgb_frame, face_location)[0]
            index = utils.recognize_face(unknown_face_encoding,
                                         known_faces_encoding)
            name = known_names[index]
            cv2.putText(frame, name, (100, 100), cv2.FONT_HERSHEY_SIMPLEX, 0.7,
                        (0, 0, 255), 2)

        top, right, bottom, left = face_location[0]
        face_height = bottom - top

        # Draw a box around the face
        cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255))

        # Display the resulting frame
        #try:
        (x, y, w,
         h) = mouth_detection.mouth_detection_video(frame, detector, predictor)

        if h < 0.2 * face_height:
            cv2.putText(frame, "close", (30, 30), cv2.FONT_HERSHEY_SIMPLEX,
                        0.7, (0, 0, 255), 2)
        else:
            cv2.putText(frame, "open", (30, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.7,
                        (0, 0, 255), 2)

            d = int(0.35 * h)
            roi = frame[y + d:y + h, x:x + w]
            #cv2.rectangle(frame, (x, y + int(0.2*h)), (x+w, y+h), (0, 255, 0), 2)
            (px, py, pw, ph) = utils.color_detection(roi)
            if pw != 0:
                cv2.rectangle(frame, (x + px, y + d + py),
                              (x + px + pw, y + d + py + ph), (0, 255, 0), 2)
            else:
                cv2.putText(frame, "no pill detected", (50, 50),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
        #except:
        #	pass
        video_shower.frame = frame
        fps.update()