def train(train_data, train_labels): # Loading instances classes = 10 hidden_layers = 50 instance_nn = ClassNN(model_dir='/tmp/model', classes=classes, hidden_number=hidden_layers) print('Training...') instance_nn.train_model(train_data, train_labels) print('Done Training')
def main(): print('Initializing main function') # Initializing instances instance_pose = ClassOpenPose() folder_training_1 = '/home/mauricio/Pictures/Poses/Walk_Front' folder_training_2 = '/home/mauricio/Pictures/Poses/Vehicle' folder_training_3 = '/home/mauricio/Pictures/Poses/Tires' data_1 = get_sets_folder(folder_training_1, 0, instance_pose) data_2 = get_sets_folder(folder_training_2, 1, instance_pose) data_3 = get_sets_folder(folder_training_3, 1, instance_pose) data_training = np.asarray(data_1[0] + data_2[0] + data_3[0], dtype=np.float) label_training = np.asarray(data_1[1] + data_2[1] + data_3[1], dtype=np.int) data_eval = np.asarray(data_1[2] + data_2[2] + data_3[2], dtype=np.float) label_eval = np.asarray(data_1[3] + data_2[3] + data_3[3], dtype=np.int) print(data_training) print(label_training) print('Len data_training: {0}'.format(data_training.shape)) print('Len data_eval: {0}'.format(data_eval.shape)) classes = 3 hidden_neurons = 15 model_dir = '/tmp/nnposes/' instance_nn = ClassNN(model_dir, classes, hidden_neurons) option = input( 'Press 1 to train the model, 2 to eval, otherwise to predict') if option == '1': print('Training the model') # Delete previous folder to avoid conflicts in the training process if os.path.isdir(model_dir): # Removing directory shutil.rmtree(model_dir) instance_nn.train_model(data_training, label_training) elif option == '2': print('Eval the model') instance_nn.eval_model(data_eval, label_eval) else: print('Not implemented!') print('Done!')
def train_model(train_data_np: np.ndarray, train_labels_np: np.ndarray, eval_data_np: np.ndarray, eval_labels_np: np.ndarray, instance_nn_train: ClassNN, steps): print('Training model into list') # Init training! # instance_train.update_batch_size(training_data_np.shape[0]) start = time.time() instance_nn_train.train_model(train_data_np, train_labels_np, steps=steps) end = time.time() # Performing data evaluation eval_model(eval_data_np, eval_labels_np, instance_nn_train)
def main(): print('Initializing main function') print('Loading datasets') train_data, train_labels = ClassImageDataSet.load_train_mnist() eval_data, eval_labels = ClassImageDataSet.load_eval_mnist() print('PCA with training data') n_features = 18 pca = PCA(n_components=n_features, svd_solver='randomized').fit(train_data) train_pca = pca.transform(train_data) n_classes = 10 hidden_neurons = 100 eval_pca = pca.transform(eval_data) print('Printing shapes') print(train_data.shape) print(train_pca.shape) model_dir = '/tmp/model_example_pca' classifier = ClassNN(model_dir, n_classes, hidden_neurons) var = input('Set 1 to train, 2 to predict. Otherwise to eval ') if var == '1': print('Training model') classifier.train_model(train_pca, train_labels) elif var == '2': print('Predict model') print('Total elements: ' + str(eval_pca.shape[0])) index = 1100 eval_item = eval_pca[index] print(eval_item.shape) result = classifier.predict_model(eval_item) print('Result obtained: ' + str(result['classes'])) print('Print probabilities') print(result['probabilities']) print('Real result: ' + str(eval_labels[index])) else: print('Evaluating model') classifier.eval_model(eval_pca, eval_labels) print('Done!')
def classify_images(list_folder_data: list, type_desc: EnumDesc): classes_number = 0 cont = True while cont: cont = False for folder_data in list_folder_data: if folder_data[2] == classes_number: classes_number += 1 cont = True break hidden_number = 60 learning_rate = 0.005 steps = 20000 # Initialize classifier instance nn_classifier = ClassNN(model_dir=ClassNN.model_dir_pose, classes=classes_number, hidden_number=hidden_number, learning_rate=learning_rate) results = ClassLoadDescriptors.load_pose_descriptors(type_desc) training_data_np = results['trainingData'] training_labels_np = results['trainingLabels'] eval_data_np = results['evalData'] eval_labels_np = results['evalLabels'] training_files_np = results['trainingFiles'] eval_files_np = results['evalFiles'] label_names = results['labelNames'] # Prompt for user input selection = input('Training selected {0}. ' 'Press 1 to train, 2 to evaluate, 3 to predict, 4 to save csv, ' '5 to get confusion matrix: '.format(type_desc)) if selection == '1': # Training nn_classifier.train_model(train_data=training_data_np, train_labels=training_labels_np, label_names=label_names, steps=steps) # Evaluate after training nn_classifier.eval_model(eval_data_np, eval_labels_np) elif selection == '2': # Evaluate nn_classifier.eval_model(eval_data_np, eval_labels_np) elif selection == '3': # Predict # Select data to eval data_eval = eval_data_np[0] label_eval = eval_labels_np[0] results = nn_classifier.predict_model(data_eval) print('Predict data np: {0}'.format(results)) print('Expected data np: {0}'.format(label_eval)) elif selection == '4': # Saving file in csv total_data = np.concatenate((training_data_np, eval_data_np), axis=0) total_labels = np.concatenate((training_labels_np, eval_labels_np), axis=0) total_files = np.concatenate((training_files_np, eval_files_np)) # Add new axis to allow concatenation total_labels = total_labels[:, np.newaxis] total_files = total_files[:, np.newaxis] total_np = np.concatenate((total_data, total_labels), axis=1) print('Saving data to CSV in file {0}'.format(csv_dir)) np.savetxt(csv_dir, total_np, delimiter=',', fmt='%10.10f') np.savetxt(csv_dir_files, total_files, delimiter=',', fmt='%s') print('Saving training data') # Concatenate with new axis total_np_train = np.concatenate((training_data_np, training_labels_np[:, np.newaxis]), axis=1) total_np_eval = np.concatenate((eval_data_np, eval_labels_np[:, np.newaxis]), axis=1) # Saving np.savetxt(csv_train, total_np_train, delimiter=',', fmt='%10.10f') np.savetxt(csv_eval, total_np_eval, delimiter=',', fmt='%10.10f') print('Done writing file in CSV') elif selection == '5': print('Getting confusion matrix') confusion_np = np.zeros((classes_number, classes_number)) for i in range(eval_data_np.shape[0]): data = eval_data_np[i] expected = eval_labels_np[i] obtained = nn_classifier.predict_model_fast(data) class_prediction = obtained['classes'] print('Class: {0}'.format(class_prediction)) confusion_np[expected, class_prediction] += 1 print('Confusion matrix') print(confusion_np) print('Labels: {0}'.format(label_names)) else: raise Exception('Option not supported')
def train_nn_cnn(training_list_poses, training_labels, eval_list_poses, eval_labels, option, base_data): print('Init NN Training') if option != Option.NN and option != option.CNN: raise Exception('Option not valid: {0}'.format(option)) # Training labels list_descriptors = list() for index_pose in range(len(training_list_poses)): list_poses = training_list_poses[index_pose] if option == Option.NN: descriptor = get_nn_descriptor(list_poses) else: descriptor = get_cnn_descriptor_pos(list_poses) list_descriptors.append(descriptor) training_descriptors_np = np.asanyarray(list_descriptors, dtype=np.float) training_labels_np = np.asanyarray(training_labels, dtype=np.int) # Eval labels list_descriptors = list() for index_pose in range(len(eval_list_poses)): list_poses = eval_list_poses[index_pose] if option == Option.NN: descriptor = get_nn_descriptor(list_poses) else: descriptor = get_cnn_descriptor_pos(list_poses) list_descriptors.append(descriptor) eval_descriptors_np = np.asanyarray(list_descriptors, dtype=np.float) eval_labels_np = np.asanyarray(eval_labels, dtype=np.int) # Initializing training instance classes = len(list_classes_classify) if option == Option.NN: hidden_number = 50 instance_model = ClassNN(ClassNN.model_dir_action, classes, hidden_number) else: instance_model = ClassCNN(ClassCNN.model_dir_action, classes, cnn_image_height, cnn_image_height, depth, batch_size=32, train_steps=15000) print('Training nn model') instance_model.train_model(training_descriptors_np, training_labels_np) print('Model trained - Evaluating') accuracy = instance_model.eval_model(eval_descriptors_np, eval_labels_np) # Evaluating all elements for folder_info in list_classes: folder = folder_info['folderPath'] for root, _, files in os.walk(folder): for file in files: full_path = os.path.join(root, file) if '_{0}_partialdata'.format(base_data) in full_path: process_file_action(full_path, option, instance_model=instance_model, accuracy=accuracy)
def train_bow(training_list_cls, training_cls_labels, validate_list_cls, validate_cls_labels, eval_list_actions, eval_labels, option: Option, base_data_1, base_data_2): print('Training BoW') # Generating BoW descriptors train_descriptors = list() for list_actions in training_list_cls: words = get_bow_descriptors(list_actions) train_descriptors.append(words) descriptors_np = np.asanyarray(train_descriptors, dtype=np.float) training_labels_np = np.asanyarray(training_cls_labels, dtype=np.int) # Generating instance_nn and train model cls_number = len(list_classes) hidden_neurons = 20 instance_nn = ClassNN(ClassNN.model_dir_activity, cls_number, hidden_neurons) instance_nn.train_model(descriptors_np, training_labels_np) # Validating model validate_descriptors = list() for list_actions in validate_list_cls: words = get_bow_descriptors(list_actions) validate_descriptors.append(words) validate_descriptors_np = np.asanyarray(validate_descriptors, dtype=np.float) validate_labels_np = np.asanyarray(validate_cls_labels, dtype=np.int) accuracy = instance_nn.eval_model(validate_descriptors_np, validate_labels_np) print('Local accuracy: {0}'.format(accuracy)) # Evaluating eval_descriptors = list() for list_actions in eval_list_actions: words = get_bow_descriptors(list_actions) eval_descriptors.append(words) eval_descriptors_np = np.asanyarray(eval_descriptors, dtype=np.float) eval_labels_np = np.asanyarray(eval_labels, dtype=np.int) real_accuracy = instance_nn.eval_model(eval_descriptors_np, eval_labels_np) print('Real accuracy: {0}'.format(real_accuracy)) classes_number = len(list_classes) confusion_np = np.zeros((classes_number, classes_number)) for i in range(eval_descriptors_np.shape[0]): data = eval_descriptors_np[i] expected = eval_labels_np[i] obtained = instance_nn.predict_model_fast(data) class_prediction = obtained['classes'] print('Class: {0}'.format(class_prediction)) confusion_np[expected, class_prediction] += 1 print('Confusion matrix') print(confusion_np) apply_classifier(option, base_data_1, base_data_2, instance_nn=instance_nn, accuracy=accuracy, real_accuracy=real_accuracy)