Esempio n. 1
0
def binaryMask(frame, x0, y0, width, height, framecount, plot):
    global guessGesture, visualize, mod, saveImg

    cv2.rectangle(frame, (x0, y0), (x0 + width, y0 + height), (0, 255, 0), 1)
    roi = frame[y0:y0 + height, x0:x0 + width]

    gray = cv2.cvtColor(roi, cv2.COLOR_BGR2GRAY)
    blur = cv2.GaussianBlur(gray, (5, 5), 2)

    th3 = cv2.adaptiveThreshold(blur, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
                                cv2.THRESH_BINARY_INV, 11, 2)
    ret, res = cv2.threshold(th3, minValue, 255,
                             cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)

    if saveImg == True:
        saveROIImg(res)
    elif guessGesture == True and (framecount % 5) == 4:
        t = threading.Thread(target=myNN.guessGesture, args=[mod, res])
        t.start()
    elif visualize == True:
        layer = int(input("Enter which layer to visualize "))
        cv2.waitKey(1)
        myNN.visualizeLayers(mod, res, layer)
        visualize = False

    return res
Esempio n. 2
0
def bkgrndSubMask(frame, x0, y0, width, height, framecount, plot):
    global guessGesture, takebkgrndSubMask, visualize, mod, bkgrnd, saveImg

    cv2.rectangle(frame, (x0, y0), (x0 + width, y0 + height), (0, 255, 0), 1)
    roi = frame[y0:y0 + height, x0:x0 + width]
    roi = cv2.cvtColor(roi, cv2.COLOR_BGR2GRAY)

    if takebkgrndSubMask == True:
        bkgrnd = roi
        takebkgrndSubMask = False
        print("Refreshing background image for mask...")

    diff = cv2.absdiff(roi, bkgrnd)

    _, diff = cv2.threshold(diff, 25, 255, cv2.THRESH_BINARY)

    mask = cv2.GaussianBlur(diff, (3, 3), 5)
    mask = cv2.erode(diff, skinkernel, iterations=1)
    mask = cv2.dilate(diff, skinkernel, iterations=1)
    res = cv2.bitwise_and(roi, roi, mask=mask)

    if saveImg == True:
        saveROIImg(res)
    elif guessGesture == True and (framecount % 5) == 4:
        t = threading.Thread(target=myNN.guessGesture, args=[mod, res])
        t.start()

    elif visualize == True:
        layer = int(input("Enter which layer to visualize "))
        cv2.waitKey(0)
        myNN.visualizeLayers(mod, res, layer)
        visualize = False

    return res
Esempio n. 3
0
def skinMask(frame, x0, y0, width, height, framecount, plot):
    global guessGesture, visualize, mod, saveImg
    # HSV values
    low_range = np.array([0, 50, 80])
    upper_range = np.array([30, 200, 255])

    cv2.rectangle(frame, (x0, y0), (x0 + width, y0 + height), (0, 255, 0), 1)
    roi = frame[y0:y0 + height, x0:x0 + width]

    hsv = cv2.cvtColor(roi, cv2.COLOR_BGR2HSV)
    mask = cv2.inRange(hsv, low_range, upper_range)

    mask = cv2.erode(mask, skinkernel, iterations=1)
    mask = cv2.dilate(mask, skinkernel, iterations=1)

    mask = cv2.GaussianBlur(mask, (15, 15), 1)

    res = cv2.bitwise_and(roi, roi, mask=mask)
    # color to grayscale
    res = cv2.cvtColor(res, cv2.COLOR_BGR2GRAY)

    if saveImg == True:
        saveROIImg(res)
    elif guessGesture == True and (framecount % 5) == 4:
        t = threading.Thread(target=myNN.guessGesture, args=[mod, res])
        t.start()
    elif visualize == True:
        layer = int(input("Enter which layer to visualize "))
        cv2.waitKey(0)
        myNN.visualizeLayers(mod, res, layer)
        visualize = False

    return res
Esempio n. 4
0
def binaryMask(frame, x0, y0, width, height):

    global guessGesture, visualize, mod, lastgesture, saveImg

    cv2.rectangle(frame, (x0, y0), (x0 + width, y0 + height), (0, 255, 0), 1)
    roi = frame[y0:y0 + height, x0:x0 + width]

    gray = cv2.cvtColor(roi, cv2.COLOR_BGR2GRAY)
    blur = cv2.GaussianBlur(gray, (5, 5), 2)

    th3 = cv2.adaptiveThreshold(blur, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
                                cv2.THRESH_BINARY_INV, 11, 2)
    ret, res = cv2.threshold(th3, minValue, 255,
                             cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)

    guess_res = "Bin"

    if saveImg == True:
        saveROIImg(res)

    elif guessGesture == True:
        retgesture, guess_res = myNN.guessGesture(mod, res)

        if lastgesture != retgesture:
            lastgesture = retgesture

    elif visualize == True:
        layer = int(raw_input("Enter which layer to visualize "))
        cv2.waitKey(1)
        myNN.visualizeLayers(mod, res, layer)
        visualize = False

    return res, guess_res
Esempio n. 5
0
def skinMask(frame, x0, y0, width, height):
    global guessGesture, visualize, mod, lastgesture, saveImg
    # HSV values
    low_range = np.array([0, 50, 80])
    upper_range = np.array([30, 200, 255])

    cv2.rectangle(frame, (x0, y0), (x0 + width, y0 + height), (0, 255, 0), 1)
    roi = frame[y0:y0 + height, x0:x0 + width]

    hsv = cv2.cvtColor(roi, cv2.COLOR_BGR2HSV)

    #Apply skin color range
    mask = cv2.inRange(hsv, low_range, upper_range)

    mask = cv2.erode(mask, skinkernel, iterations=1)
    mask = cv2.dilate(mask, skinkernel, iterations=1)

    #blur
    mask = cv2.GaussianBlur(mask, (15, 15), 1)
    #cv2.imshow("Blur", mask)

    #bitwise and mask original frame
    res = cv2.bitwise_and(roi, roi, mask=mask)

    # color to grayscale
    res = cv2.cvtColor(res, cv2.COLOR_BGR2GRAY)

    retgesture = "^______^Skin"

    if saveImg == True:
        saveROIImg(res)

    elif guessGesture == True:
        retgesture = myNN.guessGesture(mod, res)

        if lastgesture != retgesture:
            lastgesture = retgesture
            print myNN.output[lastgesture]
            time.sleep(0.01)
            #guessGesture = False

    elif visualize == True:
        layer = int(raw_input("Enter which layer to visualize "))
        cv2.waitKey(0)
        myNN.visualizeLayers(mod, res, layer)
        visualize = False

    return res, retgesture
Esempio n. 6
0
def binaryMask(frame, x0, y0, width, height):

    global guessGesture, visualize, mod, lastgesture, saveImg

    cv2.rectangle(frame, (x0, y0), (x0 + width, y0 + height), (0, 255, 0), 1)
    roi = frame[y0:y0 + height, x0:x0 + width]

    gray = cv2.cvtColor(roi, cv2.COLOR_BGR2GRAY)
    blur = cv2.GaussianBlur(gray, (5, 5), 2)
    #blur = cv2.bilateralFilter(roi,9,75,75)

    th3 = cv2.adaptiveThreshold(blur, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
                                cv2.THRESH_BINARY_INV, 11, 2)
    ret, res = cv2.threshold(th3, minValue, 255,
                             cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)
    #ret, res = cv2.threshold(blur, minValue, 255, cv2.THRESH_BINARY +cv2.THRESH_OTSU)

    retgesture = "^______^Bin"

    if saveImg == True:
        saveROIImg(res)

    elif guessGesture == True:
        retgesture = myNN.guessGesture(mod, res)

        if lastgesture != retgesture:
            lastgesture = retgesture
            #print lastgesture

            ## Checking for only PUNCH gesture here
            ## Run this app in Prediction Mode and keep Chrome browser on focus with Internet Off
            ## And have fun :) with Dino
            if lastgesture == 3:
                jump = ''' osascript -e 'tell application "System Events" to key code 49' '''
                #jump = ''' osascript -e 'tell application "System Events" to key down (49)' '''
                os.system(jump)
                print myNN.output[lastgesture] + "= Dino JUMP!"

            # time.sleep(0.01 )
            # guessGesture = False

    elif visualize == True:
        layer = int(raw_input("Enter which layer to visualize "))
        cv2.waitKey(1)
        myNN.visualizeLayers(mod, res, layer)
        visualize = False

    return res, retgesture
def bkgrndSubMask(frame, x0, y0, width, height, framecount, plot):
    global guessGesture, takebkgrndSubMask, visualize, mod, bkgrnd, lastgesture, saveImg, drawSquare

    if drawSquare:
        cv2.rectangle(frame, (x0, y0), (x0 + width, y0 + height), (0, 255, 0),
                      1)
    roi = frame[y0:y0 + height, x0:x0 + width]
    #roi = cv2.UMat(frame[y0:y0+height, x0:x0+width])
    roi = cv2.cvtColor(roi, cv2.COLOR_BGR2GRAY)

    #Take background image
    if takebkgrndSubMask == True:
        bkgrnd = roi
        takebkgrndSubMask = False
        print("Refreshing background image for mask...")

    #Take a diff between roi & bkgrnd image contents
    diff = cv2.absdiff(roi, bkgrnd)

    #was 25
    _, diff = cv2.threshold(diff, 25, 255, cv2.THRESH_BINARY)

    # mask = cv2.GaussianBlur(diff, (3,3), 5)
    # mask = cv2.erode(diff, skinkernel, iterations = 1)
    mask = cv2.dilate(diff, skinkernel, iterations=1)
    centerX, topY = 200, 200
    try:
        centerX = int(np.mean(np.nonzero(diff)[1]))
        topY = int(np.percentile(np.nonzero(diff)[0], 5))
    except:
        pass  #lol
    res = cv2.bitwise_and(roi, roi, mask=mask)
    # res = diff
    if saveImg == True:
        saveROIImg(res)
    elif guessGesture == True and (framecount % 5) == 4:
        t = threading.Thread(target=myNN.guessGesture, args=[mod, res])
        t.start()
        #t.join()
        #myNN.update(plot)

    elif visualize == True:
        layer = int(input("Enter which layer to visualize "))
        cv2.waitKey(0)
        myNN.visualizeLayers(mod, res, layer)
        visualize = False

    return res, centerX, topY
def adaptiveSubMask(frame, x0, y0, width, height, framecount, plot):
    global guessGesture, takebkgrndSubMask, visualize, mod, bkgrnd, lastgesture, saveImg, lastFrame, avgFrame

    cv2.rectangle(frame, (x0, y0), (x0 + width, y0 + height), (0, 255, 0), 1)
    roi = frame[y0:y0 + height, x0:x0 + width]
    # roi = cv2.UMat(frame[y0:y0+height, x0:x0+width])
    roi = cv2.cvtColor(roi, cv2.COLOR_BGR2GRAY)

    if (lastFrame is None):
        lastFrame = roi
        avgFrame = roi

    percentNew = .2
    avgFrame = avgFrame * (1 - percentNew) + roi * percentNew
    bkgrnd = avgFrame.astype('uint8')
    # Take background image
    # if takebkgrndSubMask == True:
    #     bkgrnd = roi
    #     takebkgrndSubMask = False
    #     print("Refreshing background image for mask...")

    # Take a diff between roi & bkgrnd image contents
    diff = cv2.absdiff(roi, bkgrnd)

    # was 25
    _, diff = cv2.threshold(diff, 25, 255, cv2.THRESH_BINARY)

    # mask = cv2.GaussianBlur(diff, (3,3), 5)
    # mask = cv2.erode(diff, skinkernel, iterations = 1)
    mask = cv2.dilate(diff, skinkernel, iterations=1)
    res = cv2.bitwise_and(roi, roi, mask=mask)
    # res = diff
    if saveImg == True:
        saveROIImg(res)
    elif guessGesture == True and (framecount % 5) == 4:
        t = threading.Thread(target=myNN.guessGesture, args=[mod, res])
        t.start()
        # t.join()
        # myNN.update(plot)

    elif visualize == True:
        layer = int(input("Enter which layer to visualize "))
        cv2.waitKey(0)
        myNN.visualizeLayers(mod, res, layer)
        visualize = False
    lastFrame = roi
    return res
Esempio n. 9
0
def skinMask(frame, plot):
    global guessGesture, visualize, mod, lastgesture, saveImg
    ## HSV values
    low_range = np.array([0, 50, 80])
    upper_range = np.array([30, 200, 255])

    cv2.rectangle(frame, (220, 1), (420, 201), (0, 255, 0), 1)
    #roi = cv2.UMat(frame[y0:y0+height, x0:x0+width])
    roi = frame[1:201, 220:420]

    hsv = cv2.cvtColor(roi, cv2.COLOR_BGR2HSV)

    ## Apply skin color range
    mask = cv2.inRange(hsv, low_range, upper_range)

    mask = cv2.erode(mask, skinkernel, iterations=1)
    mask = cv2.dilate(mask, skinkernel, iterations=1)

    ## blur
    mask = cv2.GaussianBlur(mask, (15, 15), 1)

    ## bitwise and mask original frame
    res = cv2.bitwise_and(roi, roi, mask=mask)
    ## color to grayscale
    res = cv2.cvtColor(res, cv2.COLOR_BGR2GRAY)

    if saveImg == True:
        saveROIImg(res)
    elif guessGesture == True:
        #res = cv2.UMat.get(res)
        t = threading.Thread(target=myNN.guessGesture, args=[mod, res])
        t.start()
    elif visualize == True:
        layer = int(raw_input("Enter which layer to visualize "))
        cv2.waitKey(0)
        myNN.visualizeLayers(mod, res, layer)
        visualize = False

    return res
def Main():
    global guessGesture, visualize, mod, binaryMode, bkgrndSubMode, mask, takebkgrndSubMask, x0, y0, width, height, saveImg, gestname, path
    quietMode = False

    font = cv2.FONT_HERSHEY_SIMPLEX
    size = 0.5
    fx = 10
    fy = 350
    fh = 18

    #Call CNN model loading callback
    while True:
        ans = int(input(banner))
        if ans == 2:
            mod = myNN.loadCNN(-1)
            myNN.trainModel(mod)
            input("Press any key to continue")
            break
        elif ans == 1:
            print("Will load default weight file")
            mod = myNN.loadCNN(0)
            break
        elif ans == 3:
            if not mod:
                w = int(input("Which weight file to load (0 or 1)"))
                mod = myNN.loadCNN(w)
            else:
                print("Will load default weight file")

            img = int(input("Image number "))
            layer = int(input("Enter which layer to visualize "))
            myNN.visualizeLayers(mod, img, layer)
            input("Press any key to continue")
            continue

        else:
            print("Get out of here!!!")
            return 0

    ## Grab camera input
    cap = cv2.VideoCapture(0)
    cv2.namedWindow('Original', cv2.WINDOW_NORMAL)

    # set rt size as 640x480
    ret = cap.set(3, 640)
    ret = cap.set(4, 480)

    framecount = 0
    fps = ""
    start = time.time()

    plot = np.zeros((512, 512, 3), np.uint8)
    global x
    while (True):
        ret, frame = cap.read()
        max_area = 0

        frame = cv2.flip(frame, 3)
        frame = cv2.resize(frame, (640, 480))

        if ret == True:
            if bkgrndSubMode == True:
                roi = bkgrndSubMask(frame, x0, y0, width, height, framecount,
                                    plot)
            elif binaryMode == True:
                roi = binaryMask(frame, x0, y0, width, height, framecount,
                                 plot)
            else:
                roi = skinMask(frame, x0, y0, width, height, framecount, plot)

            framecount = framecount + 1
            end = time.time()
            timediff = (end - start)
            if (timediff >= 1):
                #timediff = end - start
                fps = 'FPS:%s' % (framecount)
                start = time.time()
                framecount = 0

        cv2.putText(frame, fps, (10, 20), font, 0.7, (0, 255, 0), 2, 1)
        cv2.putText(frame, 'Options:', (fx, fy), font, 0.7, (0, 255, 0), 2, 1)
        cv2.putText(frame, 'b - Toggle Binary/SkinMask', (fx, fy + fh), font,
                    size, (0, 255, 0), 1, 1)
        cv2.putText(frame, 'x - Toggle Background Sub Mask', (fx, fy + 2 * fh),
                    font, size, (0, 255, 0), 1, 1)
        cv2.putText(frame, 'g - Toggle Prediction Mode', (fx, fy + 3 * fh),
                    font, size, (0, 255, 0), 1, 1)
        cv2.putText(frame, 'q - Toggle Quiet Mode', (fx, fy + 4 * fh), font,
                    size, (0, 255, 0), 1, 1)
        cv2.putText(frame, 'n - To enter name of new gesture folder',
                    (fx, fy + 5 * fh), font, size, (0, 255, 0), 1, 1)
        cv2.putText(frame, 's - To start capturing new gestures for training',
                    (fx, fy + 6 * fh), font, size, (0, 255, 0), 1, 1)
        cv2.putText(frame, 'ESC - Exit', (fx, fy + 7 * fh), font, size,
                    (0, 255, 0), 1, 1)

        ## If enabled will stop updating the main openCV windows
        ## Way to reduce some processing power :)
        if not quietMode:
            cv2.imshow('Original', frame)
            cv2.imshow('ROI', roi)

            if guessGesture == True:
                #plot = np.zeros((512,512,3), np.uint8)
                plot = myNN.update(plot, x)
                x = (x + 1) % 2
            cv2.imshow('Gesture Probability', plot)
            #plot = np.zeros((512,512,3), np.uint8)

        ############## Keyboard inputs ##################
        key = cv2.waitKey(5) & 0xff

        ## Use Esc key to close the program
        if key == 27:
            break

        ## Use b key to toggle between binary threshold or skinmask based filters
        elif key == ord('b'):
            binaryMode = not binaryMode
            bkgrndSubMode = False
            if binaryMode:
                print("Binary Threshold filter active")
            else:
                print("SkinMask filter active")

## Use g key to start gesture predictions via CNN
        elif key == ord('x'):
            takebkgrndSubMask = True
            bkgrndSubMode = True
            print("BkgrndSubMask filter active")

        ## Use g key to start gesture predictions via CNN
        elif key == ord('g'):
            guessGesture = not guessGesture
            print("Prediction Mode - {}".format(guessGesture))

        ## This option is not yet complete. So disabled for now
        ## Use v key to visualize layers
        #elif key == ord('v'):
        #    visualize = True

        ## Use i,j,k,l to adjust ROI window
        elif key == ord('i'):
            y0 = y0 - 5
        elif key == ord('k'):
            y0 = y0 + 5
        elif key == ord('j'):
            x0 = x0 - 5
        elif key == ord('l'):
            x0 = x0 + 5

        ## Quiet mode to hide gesture window
        elif key == ord('q'):
            quietMode = not quietMode
            print("Quiet Mode - {}".format(quietMode))

        ## Use s key to start/pause/resume taking snapshots
        ## numOfSamples controls number of snapshots to be taken PER gesture
        elif key == ord('s'):
            saveImg = not saveImg

            if gestname != '':
                saveImg = True
            else:
                print("Enter a gesture group name first, by pressing 'n'")
                saveImg = False

        ## Use n key to enter gesture name
        elif key == ord('n'):
            gestname = input("Enter the gesture folder name: ")
            try:
                os.makedirs(gestname)
            except OSError as e:
                # if directory already present
                if e.errno != 17:
                    print('Some issue while creating the directory named -' +
                          gestname)

            path = "./" + gestname + "/"

        #elif key != 255:
        #    print key

    #Realse & destroy
    cap.release()
    cv2.destroyAllWindows()
    cv2.waitKey(0)
Esempio n. 11
0
def Main():
    global guessGesture, visualize, mod, binaryMode, x0, y0, width, height, saveImg, gestname, path, img_flag, cap

    font = cv2.FONT_HERSHEY_SIMPLEX
    size = 0.7
    fx = 10
    fy = 355
    fh = 18

    #Call CNN model loading callback
    while True:
        # ans = int(raw_input(banner))
        ans = 1
        if ans == 2:
            mod = myNN.loadCNN(-1)
            myNN.trainModel(mod)
            raw_input("Press any key to continue")
            break
        elif ans == 1:
            print "Will load default weight file"
            mod = myNN.loadCNN(0)
            break
        elif ans == 3:
            if not mod:
                w = int(raw_input("Which weight file to load (0 or 1)"))
                mod = myNN.loadCNN(w)
            else:
                print "Will load default weight file"

            img = int(raw_input("Image number "))
            layer = int(raw_input("Enter which layer to visualize "))
            myNN.visualizeLayers(mod, img, layer)
            raw_input("Press any key to continue")
            continue

        else:
            print "Get out of here!!!"
            return 0

    import os
    print os.getpid()

    ## Grab camera input
    cv2.namedWindow(
        'Original',
        cv2.WND_PROP_FULLSCREEN)  # Set Camera Size, cv2.WINDOW_NORMAL
    cv2.setWindowProperty('Original', cv2.WND_PROP_FULLSCREEN,
                          cv2.WINDOW_FULLSCREEN)

    import signal

    # MAC : SIGPROF 27
    def signalCameraOnHandler(signum, frame):
        print signum
        global img_flag
        global cap

        cap = cv2.VideoCapture(0)
        cv2.namedWindow('Original', cv2.WINDOW_NORMAL)
        img_flag = "CAM"

    # MAC : SIGINFO 29
    def signalMapHandler(signum, frame):
        print signum
        global img_flag
        global cap

        cap = cv2.VideoCapture('map.gif')
        cv2.namedWindow('Original', cv2.WINDOW_NORMAL)
        img_flag = "MAP"

    # MAC : SIGUSR1 30
    def signalSmileFaceHandler(signum, frame):
        print signum
        global img_flag
        global cap

        cap = cv2.VideoCapture('smile_glow.gif')
        cv2.namedWindow('Original', cv2.WINDOW_NORMAL)
        img_flag = "SMILE"

    # MAC : SIGUSR2 31
    def signalDefaultFaceHandler(signum, frame):
        print signum
        global img_flag
        global cap

        cap = cv2.VideoCapture('normal_glow.gif')
        cv2.namedWindow('Original', cv2.WINDOW_NORMAL)
        img_flag = "NORMAL"

    # Set Signal
    signal.signal(signal.SIGPROF, signalCameraOnHandler)
    signal.signal(signal.SIGINFO, signalMapHandler)
    signal.signal(signal.SIGUSR1, signalSmileFaceHandler)
    signal.signal(signal.SIGUSR2, signalDefaultFaceHandler)

    reset_flag = False

    thumbs_up_guess_stack = 0
    smile_face_stack = 0
    camera_page_stack = 0
    map_page_stack = 0

    while True:
        ret, frame = cap.read()

        # for end of gif
        if not ret:
            if img_flag == "NORMAL":
                cap = cv2.VideoCapture('normal_glow.gif')
                cv2.namedWindow('Original', cv2.WINDOW_NORMAL)

            elif img_flag == "SMILE":
                cap = cv2.VideoCapture('smile_glow.gif')
                cv2.namedWindow('Original', cv2.WINDOW_NORMAL)

            elif img_flag == "MAP":
                cap = cv2.VideoCapture('map.gif')
                cv2.namedWindow('Original', cv2.WINDOW_NORMAL)

            ret, frame = cap.read()

        if img_flag == "CAM":
            camera_page_stack += 1

            frame = cv2.flip(frame, 3)

            guess_res = ":)"

            if ret:
                if binaryMode:  # on
                    roi, guess_res = binaryMask(frame, x0, y0, width, height)
                else:
                    roi, guess_res = skinMask(frame, x0, y0, width, height)

            # detact thumbs-up frame
            if guess_res.split()[0] == "PEACE" or guess_res.split()[0] == "OK":
                camera_page_stack -= 1
                thumbs_up_guess_stack += 1
            else:
                thumbs_up_guess_stack = 0

            # print GUESS
            cv2.putText(frame,
                        str(thumbs_up_guess_stack) + " " + guess_res, (fx, fy),
                        font, 1, (0, 0, 255), 2, 1)
            cv2.putText(frame, "Show Thumbs-Up on TONY's hat camera!",
                        (fx, fy + fh), font, size, (0, 0, 255), 1, 1)

        elif img_flag == "SMILE":
            smile_face_stack += 1

            cap = cv2.VideoCapture('smile_glow.gif')

        elif img_flag == "NORMAL":
            cap = cv2.VideoCapture('normal_glow.gif')

        elif img_flag == "MAP":
            map_page_stack += 1
            cap = cv2.VideoCapture('map.gif')

        # for signal call
        if thumbs_up_guess_stack >= 6:
            # paging to smile
            os.system("kill -30 " + str(os.getpid()))
            reset_flag = True

        if smile_face_stack >= 30:
            # paging to map
            os.system("kill -29 " + str(os.getpid()))
            reset_flag = True

        if camera_page_stack >= 150:
            # paging to map
            os.system("kill -29 " + str(os.getpid()))
            reset_flag = True

        # now threshold == 300
        if map_page_stack >= 200:
            # paging to camera
            os.system("kill -27 " + str(os.getpid()))
            reset_flag = True

        if reset_flag:
            thumbs_up_guess_stack = 0
            smile_face_stack = 0
            camera_page_stack = 0
            map_page_stack = 0
            reset_flag = False

        cv2.imshow('Original', frame)

        key = cv2.waitKey(10) & 0xff
        if key == 27:
            break

    #Realse & destroy
    cap.release()
    cv2.destroyAllWindows()
Esempio n. 12
0
def Main():
    # pdb.set_trace()
    global guessGesture, visualize, mod, binaryMode, x0, y0, width, height, saveImg, gestname, path
    quietMode = False
    
    font = cv2.FONT_HERSHEY_SIMPLEX
    size = 0.5
    fx = 10
    fy = 355
    fh = 18
    
    #Call CNN model loading callback
    while True:
        ans = int(raw_input( banner))
        if ans == 2:
            # 训练模型
            mod = myNN.loadCNN(-1)
            myNN.trainModel(mod)
            raw_input("Press any key to continue")
            break
        elif ans == 1:
            # 使用已有模型
            print "Will load default weight file"
            mod = myNN.loadCNN(0)
            break
        elif ans == 3:
            # 打印某张手势图片经过每层的处理
            if not mod:
                w = int(raw_input("Which weight file to load (0 or 1)"))
                mod = myNN.loadCNN(w)
            else:
                print "Will load default weight file"
            
            img = int(raw_input("Image number "))
            layer = int(raw_input("Enter which layer to visualize "))
            myNN.visualizeLayers(mod, img, layer)
            raw_input("Press any key to continue")
            continue
        
        else:
            print "Get out of here!!!"
            return 0
        
    ## Grab camera input
    cap = cv2.VideoCapture(0)
    cv2.namedWindow('Original', cv2.WINDOW_NORMAL)

    # set rt size as 640x480
    ret = cap.set(3,640)
    ret = cap.set(4,480)
    
    while(True):
        ret, frame = cap.read()
        max_area = 0
        
        frame = cv2.flip(frame, 3)
        
        if ret == True:
            # ! 其间 可以做保存图片的操作
            if binaryMode == True:
                roi = binaryMask(frame, x0, y0, width, height)
            else:
                roi = skinMask(frame, x0, y0, width, height)

        cv2.putText(frame,'Options:',(fx,fy), font, 0.7,(0,255,0),2,1)
        cv2.putText(frame,'b - Toggle Binary/SkinMask',(fx,fy + fh), font, size,(0,255,0),1,1)
        cv2.putText(frame,'g - Toggle Prediction Mode',(fx,fy + 2*fh), font, size,(0,255,0),1,1)
        cv2.putText(frame,'q - Toggle Quiet Mode',(fx,fy + 3*fh), font, size,(0,255,0),1,1)
        cv2.putText(frame,'n - To enter name of new gesture folder',(fx,fy + 4*fh), font, size,(0,255,0),1,1)
        cv2.putText(frame,'s - To start capturing new gestures for training',(fx,fy + 5*fh), font, size,(0,255,0),1,1)
        cv2.putText(frame,'ESC - Exit',(fx,fy + 6*fh), font, size,(0,255,0),1,1)

        ## If enabled will stop updating the main openCV windows
        ## Way to reduce some processing power :)
        if not quietMode:
            cv2.imshow('Original',frame) # 图像窗口
            cv2.imshow('ROI', roi) # 截取手势窗口
        
        # Keyboard inputs
        key = cv2.waitKey(10) & 0xff
        
        ## Use Esc key to close the program
        if key == 27:
            break
        
        ## Use b key to toggle between binary threshold or skinmask based filters
        elif key == ord('b'):
            binaryMode = not binaryMode
            if binaryMode:
                print "Binary Threshold filter active"
            else:
                print "SkinMask filter active"
        
        ## Use g key to start gesture predictions via CNN
        elif key == ord('g'):
            guessGesture = not guessGesture
            print "Prediction Mode - {}".format(guessGesture)
        
        ## This option is not yet complete. So disabled for now
        ## Use v key to visualize layers
        # elif key == ord('v'):
        #    visualize = True

        ## Use i,j,k,l to adjust ROI window
        elif key == ord('i'):
            y0 = y0 - 5
        elif key == ord('k'):
            y0 = y0 + 5
        elif key == ord('j'):
            x0 = x0 - 5
        elif key == ord('l'):
            x0 = x0 + 5

        ## Quiet mode to hide gesture window
        elif key == ord('q'):
            quietMode = not quietMode
            print "Quiet Mode - {}".format(quietMode)

        ## Use s key to start/pause/resume taking snapshots
        ## numOfSamples controls number of snapshots to be taken PER gesture
        elif key == ord('s'):
            saveImg = not saveImg
            
            if gestname != '':
                # 保存图片操作 binaryMask 或 skinMask 步骤操作
                saveImg = True
            else:
                print "Enter a gesture group name first, by pressing 'n'"
                saveImg = False
        
        ## Use n key to enter gesture name
        elif key == ord('n'):
            # 获取 手势名称 建立文件夹,然后会自动保存图片 到该目录
            gestname = raw_input("Enter the gesture folder name: ")
            try:
                os.makedirs(gestname)
            except OSError as e:
                # if directory already present
                if e.errno != 17:
                    print 'Some issue while creating the directory named -' + gestname
            
            path = "./"+gestname+"/"
        
        #elif key != 255:
        #    print key

    #Realse & destroy
    cap.release()
    cv2.destroyAllWindows()