コード例 #1
0
def read_from_folder(dir_):
    """
    Loads data and preprocess and save the images
    """
    images = []
    labels = []
    img_dir = '/home/aarav/Downloads/Compressed/' + dir_

    dataset_dir = '/home/aarav/Desktop/MajorProject/Dataset/' + dir_

    print("LOADING DATA FROM Digits: ", end="")

    for folder in os.listdir(img_dir):
        print(folder, end=' | ')

        #making sub folder fo alphabets
        if not os.path.exists(dataset_dir + folder):
            os.makedirs(dataset_dir + folder)

        for image in os.listdir(img_dir + "/" + folder):
            print(image)
            if image.endswith('txt'):
                continue
            temp_img = cv2.imread(img_dir + '/' + folder + '/' + image)
            mask1 = segment(temp_img)

            handFound, hand, contours_of_hand = utils.get_my_hand(mask1)

            print(hand.shape)

            if (handFound):
                cv2.imwrite(dataset_dir + folder + '/' + image, hand)

            break
コード例 #2
0
    def add_frame(self, image):

        mask = utils.segment(image)
        face, foundFace = utils.detect_face(image)
        mask = utils.eliminate_face(face, foundFace, mask)

        if self.__using_stabilization:
            mask = utils.stabilize(foundFace, self.__no_of_frames + 1, image,
                                   face, mask)
        hand, hand_contour = utils.get_my_hand(mask, True)

        hand_pose, direction = 'None', 'None'

        if hand_contour is not None:
            motion_detected = self.__motion.get_hand_motion(hand_contour)
        else:
            return

        if not motion_detected:
            hand_pose = recognise_hand_pose(
                hand,
                directly_from_hand=True,
                model_path='Models/silatra_gesture_signs.sav')
        else:
            direction = motion_detected

        self.__observations.append((hand_pose, direction))
        self.__no_of_frames += 1
コード例 #3
0
def recognise_hand_pose(image,
                        directly_from_hand=False,
                        model_path='Models/silatra_digits_and_letters.sav',
                        using_stabilization=False,
                        no_of_frames=1):
    '''
    ### SiLaTra Hand Pose Recognition

    Provides classification for input hand pose image.

    Inputs: (a) Mandatory Parameter - Image for which Hand Pose Classification is to be performed.

            (b) Optional Parameters (Use them only if you understand them):

                (1) directly_from_hand - boolean - Set this to true if you are passing already cropped hand region in `image` parameter.
                (2) model_path - String - If an alternate model is to be used, pass the path of its .sav file.
                (3) using_stabilization - boolean - If you intend to use Object stabilization, set this to True. Only use this option if you are classifying hand poses from a continuous feed, else its useless.
                (4) no_of_frames - Integer - ONLY TO BE USED IF using_stabilization IS True, pass the number of the frame from the continuous feed you are processing.
    '''

    import pickle
    from sklearn.neighbors import KNeighborsClassifier

    if not directly_from_hand:
        mask = utils.segment(image)
        face, foundFace = utils.detect_face(image)
        mask = utils.eliminate_face(face, foundFace, mask)

        if using_stabilization:
            mask = utils.stabilize(foundFace, no_of_frames, image, face, mask)

        hand = utils.get_my_hand(mask)
        if hand is False: return 'No hand pose in image'
        features = utils.extract_features(hand)
    else:
        features = utils.extract_features(image)

    classifier = pickle.load(open(model_path, 'rb'))
    hand_pose = classifier.predict([features])[0]

    return hand_pose
コード例 #4
0
def recognise_hand_pose(image, directly_from_hand=False, model_path='Models/digits_and_letters.sav', using_stabilization=False, no_of_frames=1):


    import pickle
    from sklearn.neighbors import KNeighborsClassifier

    if not directly_from_hand:
        mask = utils.segment(image)
        face, foundFace = utils.detect_face(image)
        mask = utils.eliminate_face(face, foundFace, mask)

        if using_stabilization: mask = utils.stabilize(foundFace, no_of_frames, image, face, mask)

        hand = utils.get_my_hand(mask)
        if hand is False: return 'No hand pose in image'
        features = utils.extract_features(hand)
    else: features = utils.extract_features(image)

    classifier = pickle.load(open(model_path, 'rb'))
    hand_pose = classifier.predict([features])[0]

    return hand_pose
コード例 #5
0
ファイル: ImgReceiver.py プロジェクト: vrr-21/Silatra
    # pred = dsk.findSign(img1)

    # print("Received Sign:",pred)
    # addToQueue(pred)

    # pred = predictSign()
    # # pred = -1
    # print("Stable Sign:",pred)


    try:
        mask = silatra.segment(img_np)
        _,thresh = cv2.threshold(mask,127,255,0)

        hand_contour = get_my_hand(thresh, return_only_contour=True)
        # hull = cv2.convexHull(hand_contour)
        # final_image = np.zeros(img_np.shape, np.uint8)
        # cv2.drawContours(final_image, [hand_contour], 0, (0, 255, 0), 2)
        
        M = cv2.moments(hand_contour)
        cx = int(M["m10"] / M["m00"])
        cy = int(M["m01"] / M["m00"])

        if prev_x is 0 and prev_y is 0: prev_x, prev_y = 0, 0
        
        delta_x, delta_y, slope, direction = prev_x-cx, prev_y-cy, 0, 'None'

        if delta_x**2+delta_y**2 > THRESHOLD**2:
            if delta_x is 0 and delta_y > 0: slope = 999 # inf
            elif delta_x is 0 and delta_y < 0: slope = -999 # -inf
コード例 #6
0
            maxArea1 = w*h
            faceRect = (x,y,w,h)
            foundFace = True
           
    '''        
    mask1 = FaceEliminator.eliminateFace(mask1, foundFace, faceRect)
    if displayWindows:
        cv2.imshow("Mask12",mask1)

  
    if displayWindows:
        cv2.imshow("Originl Img",img_np)


# contour of hand is useless delete k rna h isko 
    handFound, hand, contours_of_hand = utils.get_my_hand(mask1)

    if recognitionMode == "SIGN":
        if handFound:
            if displayWindows:
                cv2.imshow("Your hand",hand)
                
            features = utils.extract_features(hand, gridSize)
            pred = utils.predictSign(classifier,features)
            print(pred)
        else:
            pred = -1
        utils.addToQueue(pred)
        pred = utils.getConsistentSign(displayWindows)

        # pred = -1
コード例 #7
0
            try:
                print(
                    ' ' * 160 +
                    '\rProcessing image: %3d, Label = %s, From Location: %s' %
                    (i, label, DATA_LOC),
                    end='\r')

                image = cv2.imread(DATA_LOC + str(label) + "/" + str(i) +
                                   '.png')

                if image.shape[0] == 0: continue

                mask, _, _ = silatra.segment(image)
                _, thresh = cv2.threshold(mask, 127, 255, 0)

                hand = get_my_hand(thresh)
                features = extract_features(hand, grid)

                to_write_data = ''
                for feature in features:
                    to_write_data += str(feature) + ','
                to_write_data += label + '\n'

                dump_file.write(to_write_data)
                total_images_parsed += 1
            except Exception as e:
                print(e)
                continue
total = (time.time() - start)
# print(' '*160+'\rTotal time required = %3.3fs' % (total))
print('Total images parsed: %d' % (total_images_parsed))
コード例 #8
0
    This function returns a mask with white areas signifying skin and black areas otherwise.

    Returns: mask
    """

    import cv2
    from numpy import array, uint8

    blurred_img = cv2.GaussianBlur(src_img, (5, 5), 0)
    blurred_img = cv2.medianBlur(blurred_img, 5)
    blurred_img = cv2.cvtColor(blurred_img, cv2.COLOR_BGR2YCrCb)
    lower = array([0, 137, 100], uint8)
    upper = array([255, 200, 150], uint8)
    mask = cv2.inRange(blurred_img, lower, upper)
    open_kernel = cv2.getStructuringElement(cv2.MORPH_CROSS, (5, 5))
    close_kernel = cv2.getStructuringElement(cv2.MORPH_CROSS, (7, 7))
    mask = cv2.morphologyEx(mask, cv2.MORPH_OPEN, open_kernel)
    mask = cv2.morphologyEx(mask, cv2.MORPH_CLOSE, close_kernel)
    return mask


#read_from_folder('Digits/')
#read_from_folder('Letters/')
mask1 = segment(test_image)
'''
src1_mask=cv2.cvtColor(mask1,cv2.COLOR_GRAY2BGR)#change mask to a 3 channel image 
out=cv2.subtract(src1_mask,test_image)
out=cv2.subtract(src1_mask,out)
'''
handFound, hand, contours_of_hand = utils.get_my_hand(test_image, mask1)
コード例 #9
0
import pickle
dir_path = os.path.dirname(os.path.realpath(__file__))
print('dir path is \n ', dir_path)
sys.path.insert(0, dir_path+"/Modules")
sys.path.insert(0, dir_path)
import utils
import FaceEliminator
from keras.preprocessing import image
# Following modules are used specifically for Gesture recognition

currentModuleName = __file__.split(os.path.sep)[-1]
print('current module name \n',currentModuleName)


from tensorflow import keras
model = keras.models.load_model('/home/aarav/Desktop/MajorProject/Models/m.h5')

#test_image = cv2.imread('/home/aarav/Desktop/MajorProject/Dataset/Letters/a/100.png')

test_image = cv2.imread('/home/aarav/Desktop/1.png')


gray = cv2.cvtColor(test_image, cv2.COLOR_BGR2GRAY)
handFound, hand, contours_of_hand = utils.get_my_hand(gray)
hand= cv2.resize(hand, (64,64))
hand=hand/255
img = image.img_to_array(hand)
img = np.expand_dims(img, axis = 0)
pred= model.predict_classes(img)
#l= max(pred[0])
print(pred)
コード例 #10
0
def getHandImage(scr_img):
    handFound, hand, contours_of_hand = utils.get_my_hand(mask1)
    return hand