Esempio n. 1
0
def trainSignDetector(train_list, label_encoder):
    """
            train_list : list of users to use for training
            eg ["user_1", "user_2", "user_3"]
        """
    imageset, _ = helpers.getHandSet(train_list, 'Dataset/')

    # Load data for the multiclass classification task
    X_mul, Y_mul = get_data(train_list, imageset, 'Dataset/')

    print("Multiclass data loaded")

    Y_mul = label_encoder.fit_transform(Y_mul)

    #Train Multiclass classifier using Sci-kit learn classifiers
    """TODO: Experiment with different models and hyperparameters to see what gives you the best results
        #For comparison of some options, see http://scikit-learn.org/stable/auto_examples/classification/plot_classifier_comparison.html """

    model = SVC(kernel='linear', C=0.9, probability=True)

    signDetector = model.fit(X_mul, Y_mul)

    print("Sign Detector Trained")

    return signDetector
Esempio n. 2
0
def get_accuracy(test_list, label_encoder, signDetector):
    #initialize correct counter to 0
    correct = 0
    imageset, _ = helpers.getHandSet(test_list, 'Dataset/')
    #get test data
    X, Y = get_data(test_list, imageset, 'Dataset/')
    Y = label_encoder.transform(Y)
    #total is the number of examples in test data
    total = len(X)
    #for each example in test data
    for i in range(len(X)):
        #get the model's prediction for that example
        outp = label_encoder.transform(
            [helpers.predict(label_encoder, signDetector, X[i])])[0]
        #if the prediction matched the correct label, increment the correct counter
        if Y[i] == outp:
            correct += 1

    return correct / total


#label_encoder = LabelEncoder().fit(['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'K', 'L', 'M', 'N',
#       'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y'])
#
#user_list =['user_3', 'user_4','user_5','user_6','user_7','user_9','user_10']
#
#trainlistsize = len(user_list)//2
#
#train_list = user_list[:trainlistsize]
#test_list = user_list[trainlistsize:]
#
#signDetector = trainSignDetector(train_list, label_encoder)
#matrix = get_confusion_matrix(test_list, label_encoder, signDetector)
#accuracy = get_accuracy(test_list, label_encoder, signDetector)
Esempio n. 3
0
def get_accuracy(test_list, signDetector, handDetector, label_encoder):
    #get dictionary of form {filename: imagearray}
    imageset, _ = helpers.getHandSet(test_list, 'Dataset/')
    #total number of examples in the test set
    total = len(imageset)
    #number of examples correctly classified
    correct = 0
    #incrementor to keep track of progress
    inc = 0
    #for each example in the test set
    for key in imageset:
        inc += 1
        print(inc)
        #get the predicted label
        outp = label_encoder.transform([
            recognize_gesture(imageset[key], handDetector, signDetector,
                              label_encoder)[2]
        ])
        #get the actual label
        label = label_encoder.transform([key.split('/')[2][0]])
        #if predicted and actual labels match, increment number correct
        if outp == label:
            correct += 1
    #accuracy is number correct / total number
    return correct / total
Esempio n. 4
0
def load_binary_data(user_list, data_directory):
    data1, df = helpers.getHandSet(
        user_list,
        data_directory)  # data 1 - actual images , df is actual bounding box

    # third return, i.e., z is a list of hog vecs, labels
    z = buildhandnothand_lis(df, data1)
    return data1, df, z[0], z[1]
Esempio n. 5
0
def get_confusion_matrix(test_list, label_encoder, signDetector):
    #initialize all matrix values to 0
    matrix = np.zeros((24, 24))
    imageset, _ = helpers.getHandSet(test_list, 'Dataset/')
    #get test data
    X, Y = get_data(test_list, imageset, 'Dataset/')
    Y = label_encoder.transform(Y)
    #for each example in test data
    for i in range(len(X)):
        #get the model's prediction for that example
        outp = label_encoder.transform(
            [helpers.predict(label_encoder, signDetector, X[i])])[0]
        #increment appropriate cell in matrix
        matrix[int(Y[i])][int(outp)] += 1

    return matrix
Esempio n. 6
0
def get_accuracy(test_list, label_encoder, signDetector):
    #initialize correct counter to 0
    correct = 0
    imageset, _ = helpers.getHandSet(test_list, 'Dataset/')
    #get test data
    X, Y = get_data(test_list, imageset, 'Dataset/')
    Y = label_encoder.transform(Y)
    #total is the number of examples in test data
    total = len(X)
    #for each example in test data
    for i in range(len(X)):
        #get the model's prediction for that example
        outp = label_encoder.transform(
            [helpers.predict(label_encoder, signDetector, X[i])])[0]
        #if the prediction matched the correct label, increment the correct counter
        if Y[i] == outp:
            correct += 1

    return correct / total
Esempio n. 7
0
def get_confusion_matrix(test_list, signDetector, handDetector, label_encoder):
    #initialize values to 0
    matrix = np.zeros((24, 24))

    #get dictionary of form {filename: imagearray}
    imageset, _ = helpers.getHandSet(test_list, 'Dataset/')
    #incrementor to keep track of progress
    inc = 0
    #for each example in the test set
    for key in imageset:
        inc += 1
        print(inc)
        #get the predicted label
        outp = label_encoder.transform([
            recognize_gesture(imageset[key], handDetector, signDetector,
                              label_encoder)[2]
        ])[0]
        #get the actual label
        label = label_encoder.transform([key.split('/')[2][0]])[0]
        #increment the appropriate cell in the confusion matrix
        matrix[label][outp] += 1
    return matrix