Ejemplo n.º 1
0
import numpy as np
import util as ut
import svm_train as st
import re
from picamera.array import PiRGBArray
from picamera import PiCamera
import io, time

print("Opening ASL Model!")
try:
    direct = cwd + "/ASLClassifier.dat"
    print(direct)
    model = st.load(direct)
    print("SVM Loaded successfully..")
except:
    model = st.trainSVM(17)

camera = PiCamera()
camera.resolution = (640, 480)
camera.framerate = 32
rawCapture = PiRGBArray(camera, size=(640, 480))
time.sleep(0.1)
font = cv2.FONT_HERSHEY_SIMPLEX
text = " "

temp = 0
previouslabel = None
previousText = " "
label = None

for frame in camera.capture_continuous(
Ejemplo n.º 2
0
#Importing Opencv and Numpy
import cv2
import numpy as np

#Importing our dependencies
import util as ut
import svm_train as st
import hand_util as hu

import time

#create and train SVM model each time coz bug in opencv 3.1.0 svm.load() https://github.com/Itseez/opencv/issues/4969
model = st.trainSVM(9, 20, 'TrainData2')

kernel = np.ones((5, 5), np.uint8)
#Camera and font initialization
cam = int(raw_input("Enter Camera Index : "))
cap = cv2.VideoCapture(cam)
font = cv2.FONT_HERSHEY_SIMPLEX

#The main event loop
while (cap.isOpened()):
    t = time.time()
    _, img = cap.read()
    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    ret, th1 = cv2.threshold(gray.copy(), 130, 255, cv2.THRESH_BINARY)
    th1 = cv2.erode(th1, kernel, iterations=2)
    cv2.imshow('thresh', th1)
    _, contours, hierarchy = cv2.findContours(th1.copy(), cv2.RETR_EXTERNAL, 2)
    cnt = ut.getMaxContour(contours, 4000)
Ejemplo n.º 3
0
        feature.append(feature_tmp)
        if lines[-1] == '+':
            label.append(1)
        elif lines[-1] == '-':
            label.append(-1)
    f.close()
    return np.mat(feature), np.mat(label).T

print ("step 1: load data...")
dataSet, labels = load_data('data.txt')
train_x = dataSet
train_y = labels
test_x = dataSet
test_y = labels

## step 2: training...
print ("step 2: training...")
C = 0.001
toler = 0.001
maxIter = 500
svmClassifier = SVM.trainSVM(train_x, train_y, C, toler, maxIter, kernelOption = ('linear', 0))

print("b: ", svmClassifier.b)
## step 3: testing
print ("step 3: testing...")
accuracy = SVM.testSVM(svmClassifier, test_x, test_y)

## step 4: show the result
print ("step 4: show the result...")
print ('The classify accuracy is: %.3f%%' % (accuracy * 100))
SVM.showSVM(svmClassifier)
Ejemplo n.º 4
0
#other dependencies
from pymouse import PyMouse
from pykeyboard import PyKeyboard
import moosegesture as mges

#PyMouse the library to control mouse movements from python
m1 = PyMouse()
k1 = PyKeyboard()

#capturing device
cam = int(raw_input("Enter Camera Index : "))
cap = cv2.VideoCapture(cam)

#training the svm
model = st.trainSVM(3, 40, 'TrainData')

#initilizing values
thresh = 120
frame_count = 0
color = (0, 0, 255)
res = ut.get_screen_res()
w_screen = int(res['w']) + 200
h_screen = int(res['h']) + 200
font = cv2.FONT_HERSHEY_SIMPLEX

#loop 1 to calculate the mean threshhold

while (cap.isOpened()):
    # for fps calc
    t = time.time()
Ejemplo n.º 5
0
import cv2
import numpy as np
import util as ut
import svm_train as st
import time
from pygame import mixer
import threading

model=st.trainSVM(5,20,'TrainData')
move_text={'0':'ZERO','1':'ONE','2':'TWO','3':'THREE','4':'FOUR','5':'FIVE','6':'SIX','7':'SEVEN','8':'EIGHT','9':'NINE'}
aud_fil=['0.mp3','1.mp3','2.mp3','3.mp3','4.mp3','5.mp3','6.mp3','7.mp3','8.mp3','9.mp3']
cam = 0
cap=cv2.VideoCapture(cam)
font = cv2.FONT_HERSHEY_SIMPLEX
mixer.init()

while(cap.isOpened()):
        global lock
        lock=threading.Lock()
        move=''
        _,img=cap.read()
        gray=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
        _,th1 = cv2.threshold(gray.copy(),150,255,cv2.THRESH_TOZERO)
        cv2.imshow('thresh',th1)
        _,contours,_ = cv2.findContours(th1.copy(),cv2.RETR_EXTERNAL, 2)
        cnt=ut.getMaxContour(contours,4000)
        if cnt!=None:
                _,res=ut.getGestureImg(cnt,img,th1,model)
                cv2.imshow('PredictedGesture',cv2.imread('TrainData/'+res+'_1.jpg'))
                move=move_text[res]
				mixer.music.load('Aud/'+aud_fil[int(res)])
Ejemplo n.º 6
0
import cv2
import numpy as np
import util as ut
import svm_train as st 
import re
model=st.trainSVM(17)
#create and train SVM model each time coz bug in opencv 3.1.0 svm.load() https://github.com/Itseez/opencv/issues/4969
cam=int(raw_input("Enter Camera number: "))
cap=cv2.VideoCapture(cam)
font = cv2.FONT_HERSHEY_SIMPLEX

def nothing(x) :
    pass

text= " "

temp=0
previouslabel=None
previousText=" "
label = None
while(cap.isOpened()):
	_,img=cap.read()
	cv2.rectangle(img,(900,100),(1300,500),(255,0,0),3) # bounding box which captures ASL sign to be detected by the system
	img1=img[100:500,900:1300]
	img_ycrcb = cv2.cvtColor(img1, cv2.COLOR_BGR2YCR_CB)
	blur = cv2.GaussianBlur(img_ycrcb,(11,11),0)
	skin_ycrcb_min = np.array((0, 138, 67))
	skin_ycrcb_max = np.array((255, 173, 133))
	mask = cv2.inRange(blur, skin_ycrcb_min, skin_ycrcb_max)  # detecting the hand in the bounding box using skin detection
	contours,hierarchy = cv2.findContours(mask.copy(),cv2.RETR_EXTERNAL, 2) 
	cnt=ut.getMaxContour(contours,4000)						  # using contours to capture the skin filtered image of the hand
Ejemplo n.º 7
0
def camera():
    print("Opening ASL Model!")
    try:
        direct = cwd + "/ASLClassifier.dat"
        print(direct)
        model = st.load(direct)
        print("SVM Loaded successfully..")
    except:
        model = st.trainSVM(17)

    #cap=cv2.VideoCapture(0)
    #cap=PiCamera(0)
    camera = PiCamera()
    camera.resolution = (640, 480)
    camera.framerate = 32
    rawCapture = PiRGBArray(camera, size=(640, 480))
    time.sleep(0.1)
    #rgbFrame = PiRGBArray(camera, size = camera.resolution)
    #frame1 = captureProcessFrame(camera, rgbFrame, 5)
    #frameCount = 0
    font = cv2.FONT_HERSHEY_SIMPLEX
    text = " "

    temp = 0
    previouslabel = None  # Past label
    previousText = " "  # Past text
    label = None  # current label

    for frame in camera.capture_continuous(
            rawCapture, format="bgr",
            use_video_port=True):  # Capture pi camera frames
        stream = frame.array  # store frame input as array
        rawCapture.truncate(0)  # take optional size
        img = stream  # store stream as temp img
        cv2.rectangle(img, (300, 300), (100, 100), (0, 255, 0),
                      0)  # create rectangle one screen
        img1 = img[100:300, 100:300]  # image stream ratio
        img_ycrcb = cv2.cvtColor(img1,
                                 cv2.COLOR_BGR2YCR_CB)  # color format settings
        blur = cv2.GaussianBlur(
            img_ycrcb, (11, 11), 0
        )  # Gaussian blur added to screen helps deal with frams quality and performance
        skin_ycrcb_min = np.array((0, 138, 67))  # color spaces min
        skin_ycrcb_max = np.array((255, 173, 133))  # color spaces max
        mask = cv2.inRange(
            blur, skin_ycrcb_min, skin_ycrcb_max
        )  # detecting the hand in the bounding box using skin detection
        imgres, contours, hierarchy = cv2.findContours(
            mask.copy(), cv2.RETR_EXTERNAL,
            cv2.CHAIN_APPROX_SIMPLE)  # track motion points
        cnt = ut.getMaxContour(contours, 4000)  #store total motion points
        if cnt is not None:  # is there something there?
            gesture, label = ut.getGestureImg(
                cnt, img1, mask, model
            )  # passing the trained model for prediction and fetching the result
            if (
                    label != None
            ):  # is the model finding something in the box that matches a label?
                if (temp == 0):  # if temp is 0
                    previouslabel = label  # last label now label
                elif previouslabel == label:  # still same gesture?
                    previouslabel = label  # track again
                    temp += 1  # increse temp gesture count
                else:  # otherwise
                    temp = 0  # temp gesture back to 0
                if (temp == 40):  # has it been on screen for 40 temp counts?
                    if (label == 'P'):  # create sentence space if user does P
                        label = " "  # lable space
                        text = text + label  # add results to text
                    if (label == 'Q'):  # Wipe with Q(QUIT)
                        words = re.split(" +", text)  # split result
                        words.pop()  # push text off
                        text = " ".join(words)  # fresh text
                        #text=previousText
                print(text)  # print out your last gestures before quiting

            #cv2.imshow('PredictedGesture',gesture)				  # showing the best match or prediction
            cv2.putText(
                img, label, (50, 150), font, 8, (0, 125, 155),
                2)  # displaying the predicted letter on the main screen
            cv2.putText(img, text, (50, 450), font, 3, (0, 0, 255), 2)

        cv2.imshow('Frame', img)  # show on screen
        #cv2.imshow('Mask',mask)
        key = cv2.waitKey(1) & 0xFF  # wait for q to quit
        if key == ord("q"):  # is Q?
            #cap.release()
            cv2.destroyAllWindows()  # close OpenCV session
            camera.close()
            break  # stop while

    return render_template("index.html")
Ejemplo n.º 8
0
"""


import cv2
import numpy as np
import util as ut
import svm_train as st 
import time
import hand_util as hu



cam=int(raw_input("Enter Camera Index : "))
cap=cv2.VideoCapture(cam)
font = cv2.FONT_HERSHEY_SIMPLEX
model=st.trainSVM(3,40,'TrainData')
font = cv2.FONT_HERSHEY_SIMPLEX
thresh=120
frame_count=0
color=(0,0,255)


while(cap.isOpened()):

	t=time.time()
	_,img=cap.read()
	gray=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
	cv2.rectangle(img,(270,165),(370,315),color,3)
	fps=int(1/(time.time()-t))
	cv2.putText(img,"FPS: "+str(fps),(50,50), font,1,(255,255,255),2,cv2.LINE_AA)
	cv2.imshow('Frame',img)
Ejemplo n.º 9
0
import ssl
from SimpleWebSocketServer import WebSocket, SimpleWebSocketServer, SimpleSSLWebSocketServer
from optparse import OptionParser
import base64
# import ASL as asl
import svm_train as st

# test
import os, sys
from PIL import Image

STABILIZATION_QTY = 20
NUM_SAMPLES = 15
FORMAT = 'JPG'

model, word_map = st.trainSVM(NUM_SAMPLES, FORMAT)


class SimpleEcho(WebSocket):

    count = 0
    temp = 0
    previouslabel = None
    previousText = " "
    label = None

    def process(self, img):
        cv2.imshow('original_image', img)
        # _,img=cap.read()
        # cv2.rectangle(img,(900,100),(1300,500),(255,0,0),3) # bounding box which captures ASL sign to be detected by the system
        # img1=img[100:500,900:1300]
Ejemplo n.º 10
0
import cv2
import numpy as np
import util as ut
import svm_train as st
import re

model = st.trainSVM(3)
#create and train SVM model each time coz bug in opencv 3.1.0 svm.load() https://github.com/Itseez/opencv/issues/4969
cam = 0
cap = cv2.VideoCapture(cam)
font = cv2.FONT_HERSHEY_SIMPLEX


def nothing(x):
    pass


text = " "

temp = 0
previouslabel = None
previousText = " "
label = None
while (cap.isOpened()):
    _, img = cap.read()
    cv2.rectangle(
        img, (900, 100), (1300, 500), (255, 0, 0),
        3)  # bounding box which captures ASL sign to be detected by the system
    img1 = img[100:500, 900:1300]
    img_ycrcb = cv2.cvtColor(img1, cv2.COLOR_BGR2YCR_CB)
    blur = cv2.GaussianBlur(img_ycrcb, (11, 11), 0)
Ejemplo n.º 11
0
import sys
import cv2
import numpy as np
import util as ut
import svm_train as st
import re

st.trainSVM(17)
cap = cv2.VideoCapture(0)
font = cv2.FONT_HERSHEY_SIMPLEX


def nothing(x):
    pass


text = " "

temp = 0
previouslabel = None
previousText = " "
label = None
while (cap.isOpened()):
    _, img = cap.read()
    cv2.rectangle(
        img, (900, 100), (1300, 500), (255, 0, 0),
        3)  # bounding box which captures ASL sign to be detected by the system
    img1 = img[100:500, 900:1300]
    img_ycrcb = cv2.cvtColor(img, cv2.COLOR_BGR2YCrCb)
    blur = cv2.GaussianBlur(img_ycrcb, (11, 11), 0)
    skin_ycrcb_min = np.array((0, 138, 67))
Ejemplo n.º 12
0
#Importing Opencv and Numpy
import cv2
import numpy as np

#Importing our dependencies
import util as ut
import svm_train as st 



import time


#create and train SVM model each time coz bug in opencv 3.1.0 svm.load() https://github.com/Itseez/opencv/issues/4969
model=st.trainSVM(9,20,'TrainData2')
move_text={'1':'GRAB','2':'Bless','3':'Rock','4':'Stop','5':'ThumbsUp','6':'Victory','7':'Stop2','8':'Left','9':'Right'}

#Camera and font initialization
cam=int(raw_input("Enter Camera Index : "))
cap=cv2.VideoCapture(cam)
font = cv2.FONT_HERSHEY_SIMPLEX


#The main event loop
while(cap.isOpened()):
	move=''
	t=time.time()
	_,img=cap.read()
	gray=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
	ret,th1 = cv2.threshold(gray.copy(),150,255,cv2.THRESH_TOZERO)
Ejemplo n.º 13
0
import cv2
import numpy as np
import util as ut
import svm_train as st 
import time

model=st.trainSVM(9)
#create and train SVM model each time coz bug in opencv 3.1.0 svm.load() https://github.com/Itseez/opencv/issues/4969
cam=int(raw_input("Enter Camera Index : "))
cap=cv2.VideoCapture(cam)
font = cv2.FONT_HERSHEY_SIMPLEX


while(cap.isOpened()):

	t=time.time()
	_,img=cap.read()
	gray=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
	ret,th1 = cv2.threshold(gray.copy(),20,255,cv2.THRESH_BINARY)
	_,contours,hierarchy = cv2.findContours(th1.copy(),cv2.RETR_EXTERNAL, 2)
	cnt=ut.getMaxContour(contours,4000)
	if cnt!=None:
		gesture=ut.getGestureImg(cnt,img,th1,model)
		cv2.imshow('PredictedGesture',gesture)
		
	fps=int(1/(time.time()-t))
	cv2.putText(img,"FPS: "+str(fps),(50,50), font,1,(255,255,255),2,cv2.LINE_AA)
	cv2.imshow('Frame',img)
	k = 0xFF & cv2.waitKey(10)
	if k == 27:
		break
Ejemplo n.º 14
0
import cv2
import numpy as np
import util as ut
import svm_train as st
import re
model = st.trainSVM(1)  # pass the number of starting character (Eg A-> 1)
#create and train SVM model each time coz bug in opencv 3.1.0 svm.load() https://github.com/Itseez/opencv/issues/4969

# cam=int(input("Enter Camera number: "))
cap = cv2.VideoCapture(0)  #camera 0 captures the images

font = cv2.FONT_HERSHEY_SIMPLEX


def nothing(x):
    pass


label = None

while (cap.isOpened()):
    # cnt = []
    # cv2.waitKey(5000)
    _, img = cap.read()

    cv2.rectangle(
        img, (300, 200), (800, 600), (255, 0, 0),
        3)  # bounding box which captures ASL sign to be detected by the system
    # sleep(2000)

    img1 = img[300:600, 200:800]  #Image within the rectangle is cropped out
Ejemplo n.º 15
0
import cv2
import numpy as np
import util as ut
import svm_train as st
import re
model = st.trainSVM(26)

cam = int(input("Enter Camera number: "))
cap = cv2.VideoCapture(cam)
font = cv2.FONT_HERSHEY_SIMPLEX


def nothing(x):
    pass


text = " "

temp = 0
previouslabel = None
previousText = " "
label = None
while (cap.isOpened()):
    _, img = cap.read()
    img = cv2.flip(img, 1)
    cv2.rectangle(
        img, (450, 100), (640, 350), (255, 0, 0),
        2)  # bounding box which captures ASL sign to be detected by the system
    img1 = img[100:350, 450:640]
    img_ycrcb = cv2.cvtColor(img1, cv2.COLOR_BGR2YCR_CB)
    blur = cv2.GaussianBlur(img_ycrcb, (11, 11), 0)
Ejemplo n.º 16
0
from SimpleWebSocketServer import WebSocket, SimpleWebSocketServer, SimpleSSLWebSocketServer
from optparse import OptionParser
import base64
# import ASL as asl
import svm_train as st

# test
import os,sys
from PIL import Image


STABILIZATION_QTY=20
NUM_SAMPLES=15
FORMAT='JPG'

model,word_map=st.trainSVM(NUM_SAMPLES,FORMAT)


class SimpleEcho(WebSocket):

    count = 0
    temp=0
    previouslabel=None
    previousText=" "
    label = None

    def process(self,img):
        cv2.imshow('original_image',img)
    # _,img=cap.read()
        # cv2.rectangle(img,(900,100),(1300,500),(255,0,0),3) # bounding box which captures ASL sign to be detected by the system
        # img1=img[100:500,900:1300]