def investigate_dataset_size(db, feature_extraction_methods, args, proportions, best_params, config_path, size_invest_path): config_dict = {} for feat_extr_meth, par, best_param in zip(feature_extraction_methods, args, best_params): arg_dict = {list(par.keys())[0]: best_param} if par["const_args"] is not None: arg_dict.update(par["const_args"]) accuracy_values = [] for proportion in proportions: X_train, y_train, X_test, y_test = train_and_test_split( db, proportion) recognizer = FaceRecognizer(feat_extr_meth, arg_dict) recognizer.fit(X_train, y_train) accuracy = find_accuracy(X_test, y_test, recognizer) accuracy_values.append(accuracy) method_name = feat_extr_meth.__name__ method_dict = { method_name: { 'best_param': proportions[np.argmax(accuracy_values)], 'best_accuracy': max(accuracy_values) } } config_dict.update(method_dict) with open(config_path, 'w') as f: yaml.dump(config_dict, f) train_imgs_count = list(map(lambda x: int(x * 10), proportions)) file_path = os.path.join(size_invest_path, method_name) make_plot(train_imgs_count, accuracy_values, file_path, method_name, "Dataset size investigation") print(f"Dataset size investigation for {method_name} is done")
def person_recognition_example(db, proportion, feature_extraction_methods, args, images_path): X_train, y_train, X_test, y_test = train_and_test_split(db, proportion) [img_width, img_height] = X_test[0].shape new_width, new_height = img_width * 3, img_height * 3 resized_img = cv2.resize(X_test[0], (new_width, new_height)) show_image(resized_img, "Original image") cv2.imwrite(os.path.join(images_path, "Original image.png"), X_test[0]) for feat_extr_meth, par in zip(feature_extraction_methods, args): recognizer = FaceRecognizer(feat_extr_meth, par) recognizer.fit(X_train, y_train) prediction, pred_number = recognizer.predict(X_test[0]) method_name = feat_extr_meth.__name__ resized_img = cv2.resize(X_train[pred_number], (new_width, new_height)) show_image(resized_img, method_name) cv2.imwrite(os.path.join(images_path, f"{method_name}.png"), X_train[pred_number])
def investigate_params(X_train, y_train, X_test, y_test, feature_extraction_methods, args, param_graphs_path, config_path): best_params = [] config_dict = {} for feat_extr_meth, par in zip(feature_extraction_methods, args): accuracy_values = [] for arg in list(par.values())[0]: arg_dict = {list(par.keys())[0]: arg} if par["const_args"] is not None: arg_dict.update(par["const_args"]) recognizer = FaceRecognizer(feat_extr_meth, arg_dict) recognizer.fit(X_train, y_train) accuracy = find_accuracy(X_test, y_test, recognizer) accuracy_values.append(accuracy) best_param = list(par.values())[0][np.argmax(accuracy_values)] best_params.append(best_param) method_name = feat_extr_meth.__name__ method_dict = { method_name: { 'best_param': best_param, 'best_accuracy': max(accuracy_values) } } config_dict.update(method_dict) with open(config_path, 'w') as f: yaml.dump(config_dict, f) paameters = list(par.values())[0] file_path = os.path.join(param_graphs_path, method_name) make_plot(paameters, accuracy_values, file_path, method_name, "Parameters investigation") print(f"Parameters investigation for {method_name} is done") return best_params