def main(): saved_classifier_filename = '../classifiers/msu_mfsd' + version + extension # load or recompute train features. If none, the train features are not loaded into memory load_train_features = True # retrain or load classifier load_classifier = False # load or recompute test features load_test_features = True # descriptor computer mlbp_feature_computer = feature_computer.FrameFeatureComputer(features.MultiScaleLocalBinaryPatterns((8,1), (8,2), (16, 2))) #mlbp_feature_computer = feature_computer.FrameFeatureComputer(features.LocalBinaryPatterns(8,1)) (train_features, train_labels) = get_train_features_and_labels(load_train_features, mlbp_feature_computer) if not load_classifier: ''' param_grid = [ {'C':[0.0001, 0.001, 0.01], 'kernel':['linear'], 'class_weight':['balanced', None]}, {'C':[0.0001, 0.001, 0.01], 'kernel':['rbf'],'gamma':[0.0001, 0.001], 'class_weight':['balanced', None]} ] ''' # C = 0.0001, kernel=linear, class_weight=balanced param_grid = {'C':[0.0001], 'kernel':['linear'], 'gamma':[0.0001, 0.001, 1, 10], 'class_weight':['balanced'], 'decision_function_shape':['ovr']} clf = GridSearchCV(svm.SVC(verbose=True, probability=True), param_grid, verbose=True, n_jobs=4) #clf = svm.SVC(verbose=True, probability=True, C=0.0001, kernel='linear', class_weight='balanced') #clf = svm.SVC(verbose=True, probability=True, C=0.001, kernel='rbf', gamma=0.001, class_weight='balanced') # C = 5.77218597038 gamma = 9.38268773999 class_weight = balanced kernel = linear clf = svm.SVC(verbose=True, probability=True, C=5.772185, gamma=9.3826877, class_weight='balanced', kernel='linear') #param_dist = { # 'C':sp_uniform(0.00001, 10), 'kernel':['rbf', 'linear'], 'gamma':sp_uniform(0.0001, 10), # 'class_weight':['balanced', None], 'decision_function_shape':['ovr', 'ovo'] #} #n_iter_search = 20 #clf = RandomizedSearchCV(svm.SVC(verbose=True, probability=True), param_distributions=param_dist, # n_iter=n_iter_search, n_jobs=4, verbose=True) clf.fit(train_features, train_labels) #print("Best estimator found by grid search:") #print(clf.best_estimator_) joblib.dump(clf, saved_classifier_filename) else: clf = joblib.load(saved_classifier_filename) (test_features, test_labels) = get_test_features_and_labels(load_test_features, mlbp_feature_computer) test_labels_bin = label_binarize(test_labels, classes=[-1,1]) pred_labels = clf.predict(test_features) pred_confidences = clf.predict_proba(test_features) plot_roc_curve(test_labels_bin, pred_confidences) from sklearn.metrics import roc_curve, accuracy_score,confusion_matrix, roc_auc_score, auc from scipy.optimize import brentq from scipy.interpolate import interp1d roc_auc = roc_auc_score(test_labels, pred_confidences[:,1]) print('ROC area under the curve score {}'.format(roc_auc)) # compute the equal error rate fpr, tpr, _ = roc_curve(test_labels, pred_confidences[:,1]) eer = brentq(lambda x: 1. - x - interp1d(fpr, tpr)(x), 0., 1.) print('Equal error rate {}'.format(eer)) print('Accuracy score {}'.format(accuracy_score(test_labels, pred_labels))) print('Confusion matrix {}'.format(confusion_matrix(test_labels, pred_labels)))
def main(): # pickels filenames saved_classifier_filename = "../classifiers/msu_mfsd.pkl" # load or recompute train features. If none, the train features are not loaded into memory load_train_features = True # retrain or load classifier load_classifier = True # load or recompute test features load_test_features = True # descriptor computer mlbp_feature_computer = feature_computer.FrameFeatureComputer( features.MultiScaleLocalBinaryPatterns((8, 1), (8, 2), (16, 2)) ) # mlbp_feature_computer = feature_computer.FrameFeatureComputer(features.LocalBinaryPatterns(8,1)) ( real_features, spoof_features_per_dir, labels_real, labels_spoof_per_dir, ) = get_features_and_labels(load_train_features, mlbp_feature_computer) # here I should do a cross validation on the features """ param_grid = [ {'C': [0.0001, 0.001, 0.01], 'kernel':['linear'], 'class_weight':['balanced', None]}, {'C': [0.0001, 0.001, 0.01], 'kernel':['rbf'],'gamma':[0.0001, 0.001], 'class_weight':['balanced', None]} ] """ test_fold = dbfeatures.compute_msu_ussa_subjects_folds_arr() ps = PredefinedSplit(test_fold=test_fold) clf = svm.SVC( verbose=True, probability=True, C=0.0001, kernel="linear", class_weight="balanced", ) folds_eer = [] threshes = [] confusion_matrices = [] for train_index, test_index in ps: # split the features into current train and test folds train_features = real_features[train_index] test_features = real_features[test_index] train_labels = labels_real[train_index] test_labels = labels_real[test_index] for i in range(len(spoof_features_per_dir)): train_features = np.concatenate( (train_features, spoof_features_per_dir[i][train_index]), 0 ) test_features = np.concatenate( (test_features, spoof_features_per_dir[i][test_index]), 0 ) train_labels = np.concatenate( (train_labels, labels_spoof_per_dir[i][train_index]), 0 ) test_labels = np.concatenate( (test_labels, labels_spoof_per_dir[i][test_index]), 0 ) # train the classifier clf.fit(train_features, train_labels) # use the classifier to predict the labels for test_features pred_labels = clf.predict(test_features) # create the roc curve fpr, tpr, threshold = roc_curve(test_labels, pred_labels, pos_label=1) # compute the equal error rate eer = brentq(lambda x: 1.0 - x - interp1d(fpr, tpr)(x), 0.0, 1.0) thresh = interp1d(fpr, threshold)(eer) folds_eer.append(eer) threshes.append(thresh) conf_mat = confusion_matrix(test_labels, pred_labels) confusion_matrices.append(conf_mat) # print the mean and standard deviation of equal error rate across the folds print(np.mean(folds_eer), np.std(folds_eer)) for conf_mat in confusion_matrices: print(conf_mat)
def main(): # descriptor computer mlbp_feature_computer = feature_computer.FrameFeatureComputer( features.MultiScaleLocalBinaryPatterns((8, 1), (8, 2), (16, 2))) (train_features, train_labels) = get_train_features_and_labels(load_train_features) if not load_classifier: ''' param_grid = [ {'C': [0.0001, 0.001, 0.01], 'kernel':['linear'], 'class_weight':['balanced', None]}, {'C': [0.0001, 0.001, 0.01], 'kernel':['rbf'],'gamma':[0.0001, 0.001], 'class_weight':['balanced', None]} ] ''' param_grid = { 'C': [0.0001, 0.001, 0.01], 'kernel': ['linear'], 'class_weight': ['balanced', None] } #clf = GridSearchCV(svm.SVC(verbose=True, probability=True), param_grid, verbose=True, n_jobs=4) clf = svm.SVC(verbose=True, probability=True, C=0.001, kernel='linear', class_weight='balanced') #clf = svm.SVC(verbose=True, probability=True, C = 0.001, kernel='rbf', gamma=0.1, class_weight='balanced') clf.fit(train_features, train_labels) #print("Best estimator found by grid search:") #print(clf.best_estimator_) joblib.dump(clf, saved_classifier_filename) else: clf = joblib.load(saved_classifier_filename) (test_features, test_labels) = get_test_features_and_labels(load_test_features) test_labels_bin = label_binarize(test_labels, classes=[-1, 1]) pred_labels = clf.predict(test_features) pred_confidences = clf.predict_proba(test_features) plot_roc_curve(test_labels_bin, pred_confidences) from sklearn.metrics import roc_curve, accuracy_score, confusion_matrix, roc_auc_score, auc from scipy.optimize import brentq from scipy.interpolate import interp1d roc_auc = roc_auc_score(test_labels, pred_confidences[:, 1]) print('ROC area under the curve score {}'.format(roc_auc)) # compute the equal error rate fpr, tpr, _ = roc_curve(test_labels, pred_confidences[:, 1]) eer = brentq(lambda x: 1. - x - interp1d(fpr, tpr)(x), 0., 1.) print('Equal error rate {}'.format(eer)) print('Accuracy score {}'.format(accuracy_score(test_labels, pred_labels))) print('Confusion matrix {}'.format( confusion_matrix(test_labels, pred_labels)))
def main(): # cameraFeed = cv2.VideoCapture(0) cameraFeed = cv2.VideoCapture(args.captureDevice) known_faces_rep = loadKnownFaces(args.knownFacesDir) face_spoof_validator = faceSpoofValidation.FaceSpoofValidator( features.MultiScaleLocalBinaryPatterns((8, 1), (8, 2), (16, 2)), "faceSpoofDetection/classifiers/powerful_classifier.pkl", ) while True: start_frame_processing = time.time() ret, orig_frame = cameraFeed.read() print("Dimmension of the frame is {}".format(orig_frame.shape)) if ret == False: break start = time.time() frame = cv2.resize(orig_frame, (0, 0), fx=args.scale, fy=args.scale) bbs = align.getAllFaceBoundingBoxes(frame) if args.verbose: print("Detecting faces took {}".format(time.time() - start)) if bbs is None: continue start = time.time() aligned_faces = [] for bb in bbs: aligned_faces.append( align.align( args.imgDim, frame, bb, landmarkIndices=openface.AlignDlib.OUTER_EYES_AND_NOSE, )) if args.verbose: print("Aligning faces took {}".format(time.time() - start)) start = time.time() for i, bb in enumerate(bbs): # move these 3 lines after you find the corresponding face ll = ( int(round(bb.left() / args.scale)), int(round(bb.bottom() / args.scale)), ) ur = ( int(round(bb.right() / args.scale)), int(round(bb.top() / args.scale)), ) face = aligned_faces[i] if not face_spoof_validator.validate_face(face): identify_face(face, known_faces_rep, orig_frame, ll, ur) else: cv2.rectangle(orig_frame, ll, ur, color=(0, 0, 255), thickness=3) if args.verbose: print("Identifying faces took {}".format(time.time() - start)) cv2.imshow("id", orig_frame) if args.verbose: print("Processing a frame took {}".format(time.time() - start_frame_processing)) if cv2.waitKey(1) & 0xFF == ord("q"): break
from sklearn.metrics import roc_curve, accuracy_score, confusion_matrix, roc_auc_score from scipy.optimize import brentq from scipy.interpolate import interp1d import joblib import idiap_classifier import casia_classifier import msu_mfsd_classifier import msu_ussa_classifier import feature_computer from faceSpoofDetection import features mlbp_feature_computer = feature_computer.FrameFeatureComputer( features.MultiScaleLocalBinaryPatterns((8, 1), (8, 2), (16, 2))) classifier_name = "powerful_classifier.pkl" classifier_path = "../classifiers/" + classifier_name ( idiap_train_features, idiap_train_labels, ) = idiap_classifier.get_train_features_and_labels(True, mlbp_feature_computer) print("Loaded idiap training features") ( idiap_test_features, idiap_test_labels, ) = idiap_classifier.get_test_features_and_labels(True, mlbp_feature_computer) print("Loaded idiap test features")
def main(): saved_classifier_filename = "../classifiers/idiap.pkl" # load or recompute train features. If none, the train features are not loaded into memory load_train_features = True # retrain or load classifier load_classifier = False # load or recompute test features load_test_features = True # descriptor computer mlbp_feature_computer = feature_computer.FrameFeatureComputer( features.MultiScaleLocalBinaryPatterns((8, 1), (8, 2), (16, 2)) ) (train_features, train_labels) = get_train_features_and_labels( load_train_features, mlbp_feature_computer ) size = len(train_features) test_features = train_features[-size / 10 :] test_labels = train_labels[-size / 10 :] train_features = train_features[: -size / 10] train_labels = train_labels[: -size / 10] if not load_classifier: """ param_grid = [ {'C':[0.0001, 0.001, 0.01], 'kernel':['linear'], 'class_weight':['balanced', None]}, {'C':[0.0001, 0.001, 0.01], 'kernel':['rbf'],'gamma':[0.0001, 0.001], 'class_weight':['balanced', None]} ] # C = 0.0001, kernel=linear, class_weight=balanced clf = GridSearchCV(svm.SVC(verbose=True, probability=True), param_grid, verbose=True) """ # clf = svm.SVC(verbose=True, probability=True, C=0.0001, kernel='linear', class_weight='balanced') clf = svm.SVC( verbose=True, probability=True, C=0.0001, kernel="rbf", gamma=0.001, class_weight="balanced", ) clf.fit(train_features, train_labels) # print("Best estimator found by grid search:") # print(clf.best_estimator_) # joblib.dump(clf, saved_classifier_filename) else: clf = joblib.load(saved_classifier_filename) # (test_features, test_labels) = get_test_features_and_labels(load_test_features) test_labels_bin = label_binarize(test_labels, classes=[-1, 1]) pred_labels = clf.predict(test_features) pred_confidences = clf.predict_proba(test_features) plot_roc_curve(test_labels_bin, pred_confidences) from sklearn.metrics import ( roc_curve, accuracy_score, confusion_matrix, roc_auc_score, auc, ) from scipy.optimize import brentq from scipy.interpolate import interp1d roc_auc = roc_auc_score(test_labels, pred_confidences[:, 1]) print("ROC area under the curve score {}".format(roc_auc)) # compute the equal error rate fpr, tpr, _ = roc_curve(test_labels, pred_confidences[:, 1]) eer = brentq(lambda x: 1.0 - x - interp1d(fpr, tpr)(x), 0.0, 1.0) print("Equal error rate {}".format(eer)) print("Accuracy score {}".format(accuracy_score(test_labels, pred_labels))) print("Confusion matrix {}".format(confusion_matrix(test_labels, pred_labels)))