def __init__(self, inception_location, train_data_location): self.inception = InceptionClassifier(inception_location) self.train_dict = self.load_images_into_dict(train_data_location) features, classes = self.get_features_and_classes_from_dict( self.train_dict) self.knn = KnnClassifier(features, classes)
class AiManager: THRESHOLD = 0.8 CLASS_NAMES = ['left', 'right', 'space'] training = False def __init__(self, inception_location, train_data_location): self.inception = InceptionClassifier(inception_location) self.train_dict = self.load_images_into_dict(train_data_location) features, classes = self.get_features_and_classes_from_dict( self.train_dict) self.knn = KnnClassifier(features, classes) def get_features_and_classes_from_dict(self, train_dict): classes = [] image_features = [] for key in sorted(train_dict.keys()): for feature in train_dict[key]: image_features.append(feature) classes.append(key) return image_features, classes def load_images_into_dict(self, image_folder): train_dict = {'left': [], 'right': [], 'space': []} for dir in sorted(os.listdir(image_folder)): if os.path.isdir(os.path.join(image_folder, dir)): for file in os.listdir(os.path.join(image_folder, dir)): if os.path.isfile(os.path.join(image_folder, dir, file)) and 'jpg' in file: print('Calculating features for: {}'.format( os.path.join(image_folder, dir, file))) image = cv2.imread( os.path.join(image_folder, dir, file)) image_feature = self.inception.get_features_from_image( image) train_dict.get(dir).append(image_feature) return train_dict def classify_gesture_on_image(self, image): start = time.time() image_features = self.inception.get_features_from_image(image) predicted_class, probability = self.knn.predict_proba_for_image_features( image_features, self.CLASS_NAMES) print('Time to analyze frame: {} ms'.format( (time.time() - start) * 1000)) print(predicted_class, probability) # if probability >= self.THRESHOLD: # # press(predicted_class) # return predicted_class return predicted_class, probability
def main(k, data_set_name=None): split_percentage = 0.7 # Load dataset if data_set_name is not None: data_set = read_file_into_dataset("C:\\Users\\Grant\\Documents\\School\\Winter 2016\\CS 450\\Prove01\\" + data_set_name) data_set = randomize_dataset(data_set) else: data_set_name = "iris" iris = datasets.load_iris() data_set = randomize_dataset(iris) data_set.data = normalize(data_set.data) data_sets = split_dataset(data_set, split_percentage) training_set = data_sets['train'] testing_set = data_sets['test'] # My Classifier knnClassifier = KnnClassifier() knnClassifier.k = k knnClassifier.train(training_set.data, training_set.target, training_set.target_names) predictions = knnClassifier.predict(testing_set.data) my_accuracy = get_accuracy(predictions, testing_set.target) # Better Classifier better_classifier = KNeighborsClassifier(n_neighbors=k) better_classifier.fit(training_set.data, training_set.target) predictions = better_classifier.predict(testing_set.data) better_accuary = get_accuracy(predictions, testing_set.target) print("My results: " + str(my_accuracy) + "%") print("Better results: " + str(better_accuary) + "%") results = "k = " + str(k) + "\nMy results: " + str(my_accuracy) + "%\n" + "Better results: " + str(better_accuary) + "%\n" write_to_results_file(os.getcwd() + os.sep + ".." + os.sep + str(k) + "-" + data_set_name + "_results.txt", results, k)
def run_knn_with_k_alpha_kfold(k, alpha, X_train, y_train, cv, seed=0, n_iters=3): scoring = {'acc': 'accuracy', 'f1': 'f1_macro'} results = {'acc': [], 'f1': [], 'times': []} clf = KnnClassifier(k=k, alpha=alpha) res = cross_validate(clf, X_train, y_train, cv=cv, scoring=scoring, return_train_score=False, n_jobs=-1) results['acc'].append(np.mean(res['test_acc'])) results['f1'].append(np.mean(res['test_f1'])) results['times'].append(np.mean(res['score_time'])) return results
def run_knn_with_k_alpha(k, alpha, X_train, y_train, cv_k=5, seed=0, n_iters=3): scoring = {'acc': 'accuracy', 'f1': 'f1_macro'} results = {'acc': [], 'f1': [], 'times': []} clf = KnnClassifier(k=k, alpha=alpha) cv = StratifiedKFold(n_splits=cv_k, random_state=seed) cv = list(cv.split(X_train, y_train))[:n_iters] res = cross_validate(clf, X_train, y_train, cv=cv, scoring=scoring, return_train_score=False, n_jobs=-1) results['acc'].append(np.mean(res['test_acc'])) results['f1'].append(np.mean(res['test_f1'])) results['times'].append(np.mean(res['score_time'])) return results
def main(k, data_set_name=None): split_percentage = 0.7 # Load dataset if data_set_name is not None: data_set = read_file_into_dataset( "C:\\Users\\Grant\\Documents\\School\\Winter 2016\\CS 450\\Prove01\\" + data_set_name) data_set = randomize_dataset(data_set) else: data_set_name = "iris" iris = datasets.load_iris() data_set = randomize_dataset(iris) data_set.data = normalize(data_set.data) data_sets = split_dataset(data_set, split_percentage) training_set = data_sets['train'] testing_set = data_sets['test'] # My Classifier knnClassifier = KnnClassifier() knnClassifier.k = k knnClassifier.train(training_set.data, training_set.target, training_set.target_names) predictions = knnClassifier.predict(testing_set.data) my_accuracy = get_accuracy(predictions, testing_set.target) # Better Classifier better_classifier = KNeighborsClassifier(n_neighbors=k) better_classifier.fit(training_set.data, training_set.target) predictions = better_classifier.predict(testing_set.data) better_accuary = get_accuracy(predictions, testing_set.target) print("My results: " + str(my_accuracy) + "%") print("Better results: " + str(better_accuary) + "%") results = "k = " + str(k) + "\nMy results: " + str( my_accuracy) + "%\n" + "Better results: " + str(better_accuary) + "%\n" write_to_results_file( os.getcwd() + os.sep + ".." + os.sep + str(k) + "-" + data_set_name + "_results.txt", results, k)
def crossval_incrementing_k(X, y, k_range, alpha, cv_k, seed=0, n_iters=3, with_pca=True, quiet=True, times=1): # n_iters es la cantidad de iteraciones que se realizan de kfold. scoring = {'acc': 'accuracy', 'f1': 'f1_macro'} results = {'acc': [], 'f1': [], 'times': []} for k in k_range: clf = KnnClassifier(k=k, alpha=alpha, with_pca=with_pca, quiet=quiet) cv = StratifiedKFold(n_splits=cv_k, random_state=seed) cv = list(cv.split(X, y))[:n_iters] res_times = [] for _ in range(times): res = cross_validate(clf, X, y, cv=cv, scoring=scoring, return_train_score=False, n_jobs=-1) res_times.append(np.mean(res['score_time'])) results['acc'].append(np.mean(res['test_acc'])) results['f1'].append(np.mean(res['test_f1'])) results['times'].append(np.mean(res_times)) print(k) return results
from knn_classifier import KnnClassifier from cross_validation import CrossValidation from preprocessor import Preprocessor from hybrid_classifier import HybridClassifier from rna_module import RnaModule from knn_module import KnnModule from evaluate_module import EvaluateModule from dataSet import DataSet dts = DataSet() dts.setFilePath("bases/sub_bases/") #CONFIGURACAO DO KNN knn = KnnModule() knn.setKNeighbors(1) knn_classifier = KnnClassifier() knn_classifier.setKnn(knn) #PREPROCESSADOR PARA ATRIBUTOS CATEGORICOS preprocessor = Preprocessor() preprocessor.setColumnsCategory(['protocol_type', 'service', 'flag']) evaluate = EvaluateModule() cross = CrossValidation() #DEFINIR A ITERACAO QUE O CROSS VALIDATION ESTA cross.setIteration(1) cross.setPreprocessor(preprocessor)
df.columns[i]].quantile(0.25) != 1: index.append(i) features = df[df.columns[index]] labels = df[df.columns[len(df.columns) - 1]] # Replacing alphabets to numbers for ease of use labels = labels.replace('A', 1).replace('B', 2).replace('C', 3).replace( 'D', 4).replace('E', 5) X = features.values y = labels.values X_train, X_test, Y_train, Y_test = train_test_split(features.values, labels.values, test_size=0.2) # Running knn classifier on the dataset start = timeit.default_timer() knnClf = KnnClassifier(X_train, Y_train) predictions = knnClf.predict(X_test=X_test, k=41) stop = timeit.default_timer() print("Run Time: ", stop - start) # transform the list into an array predictions = np.asarray(predictions) # evaluating accuracy accuracy = accuracy_score(Y_test[0:100], predictions) print('\nThe accuracy of our classifier is %d%%' % accuracy)
args = parser.parse_args() classifier = InceptionClassifier(args.model) train_dict = {'left': [], 'right': [], 'space': []} threshold = 0.8 class_names = ['left', 'right', 'space'] training = False safe_thread = None features, classes = load_images_from_folder(args.folder) knn = KnnClassifier(features, classes) vs = WebcamStream(0) vs.start() classify_thread = threading.Thread(target=classify_stream) classify_thread.daemon = True classify_thread.start() root = Tk() root.bind('<Escape>', lambda e: stop()) panel = None label = Label(root, text="Click the buttons to start and stop recording gestures.") label.grid(row=0, columnspan=4) lbl = Label(root, text="Gestures to go left") lbl.grid(row=1, column=1, columnspan=2)