Example #1
0
def getGestureImg(cnt, img, th1, model):
    x, y, w, h = cv2.boundingRect(cnt)
    cv2.rectangle(img, (x, y), (x + w, y + h), (0, 255, 0), 2)
    imgT = img[y:y + h, x:x + w]
    imgT = cv2.bitwise_and(imgT, imgT, mask=th1[y:y + h, x:x + w])
    imgT = cv2.resize(imgT, (200, 200))
    imgTG = cv2.cvtColor(imgT, cv2.COLOR_BGR2GRAY)
    resp = st.predict(model, imgTG)
    img = cv2.imread('TrainData\\' + chr(int(resp[1]) + 64) + '_2.jpg')
    return img, chr(int(resp[1]) + 64)
Example #2
0
def getGestureImg(cnt,img,th1,model):
    x,y,w,h = cv2.boundingRect(cnt)
    cv2.rectangle(img,(x,y),(x+w,y+h),(0,255,0),2)
    img_gray = cv2.cvtColor(img[y:y+h,x:x+w], cv2.COLOR_BGR2GRAY)
    img_gray_inv = 255 - img_gray
    img_blur = cv2.GaussianBlur(img_gray_inv, ksize=(21, 21),sigmaX=0, sigmaY=0)
    imgT = dodgeV2(img_gray, img_blur)
    imgT=cv2.bitwise_and(imgT,imgT,mask=th1[y:y+h,x:x+w])
    imgT=cv2.resize(imgT,(200,200))
    resp=st.predict(model,imgT)
    img=cv2.imread('DB/'+chr(int(resp[0])+64)+'_2.jpg')
    return img,chr(int(resp[0])+64)
Example #3
0
pipeline = rs.pipeline()
config = rs.config()
config.enable_stream(rs.stream.color, 640, 480, rs.format.bgr8, 15)
pipeline.start(config)

try:
    while True:
        frames = pipeline.wait_for_frames()
        color_frame = frames.get_color_frame()

        if not color_frame:
            continue

        # Convert realsense images to numpy arrays
        color_image = np.asanyarray(color_frame.get_data())
        #color_image = color_image[300:700, 300:500]

        cv2.imshow('Result', color_image)
        key = cv2.waitKey(10)
        if key == ord('s'):
            color_image = st.predict(color_image)

            cv2.imshow("Result", color_image)
            cv2.waitKey()
        if key == ord('e'):
            break

finally:
    pipeline.stop()