def extract_features_img(st): img = get_image(st) gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) X_0 = lbp_features(gray, hdiv=1, vdiv=1, mapping='nri_uniform') XRed = lbp_features(img[:, :, 0], hdiv=1, vdiv=1, mapping='nri_uniform') XGreen = lbp_features(img[:, :, 1], hdiv=1, vdiv=1, mapping='nri_uniform') XBlue = lbp_features(img[:, :, 2], hdiv=1, vdiv=1, mapping='nri_uniform') X_lbp = np.concatenate((X_0, XRed, XGreen, XBlue)) features = np.asarray(X_lbp) return features
def predict_from_scratch(filepath, data_filename, Xtrain=None, Ytrain=None, knn=None): if Xtrain is None or Ytrain is None: assert knn, "Por favor dar el modelo, o bien los datos Xtrain y Ytrain" # Obtenemos los valores guardados with open(data_filename) as f: data = json.load(f) hdiv = int(data['hdiv']) vdiv = int(data['vdiv']) local_p_clean = np.array(data['p_clean']) local_p_sfs = np.array(data['p_sfs']) # Calculamos las features im = imread(filepath) lbp = lbp_features(im, hdiv=hdiv, vdiv=vdiv) # Filtramos lbp_clean = np.array([lbp[i] for i in local_p_clean]) lbp_sfs = np.array([lbp_clean[i] for i in local_p_sfs]) lbp_sfs = lbp_sfs.reshape(1, -1) if not knn: knn = KNeighborsClassifier(n_neighbors=1) knn.fit(Xtrain, Ytrain) local_pred = knn.predict(lbp_sfs) print(f"La predicción es: {local_pred}")
def main(path, n= 50): #Abrimos el clasificador que entrenamos previamente with open('T02/trained_classifiers/{}.p'.format(n), 'rb') as fp: classifier, selected_chars = pickle.load(fp) _width, _height, rows, _info = png.Reader(filename= path).read() img_lbp = [lbp_features(get_greyscale_matrix(rows), hdiv=1, vdiv=1)] Y_test = filter_matrix_by_columns(img_lbp, selected_chars) return classifier.predict(Y_test)
def create_info(train_0_path= "T02/Training_0/", train_1_path= "T02/Training_1/", test_0_path= "T02/Testing_0/", test_1_path= "T02/Testing_1/"): train = {0: [], 1: []} test = {0: [], 1: []} # All features from train if not os.path.exists('T02/train_lbps.p'): for img_name in os.listdir(train_0_path): if ".png" not in img_name: continue _width, _height, rows, _info = png.Reader(filename="{}{}".format(train_0_path, img_name)).read() train[0].append(lbp_features(get_greyscale_matrix(rows), vdiv=1, hdiv=1)) for img_name in os.listdir(train_1_path): if ".png" not in img_name: continue _width, _height, rows, _info = png.Reader(filename="{}{}".format(train_1_path, img_name)).read() train[1].append(lbp_features(get_greyscale_matrix(rows), vdiv=1, hdiv=1)) with open('T02/train_lbps.p', 'wb') as fp: pickle.dump(train, fp) # All features from test if not os.path.exists('T02/test_lbps.p'): for img_name in os.listdir(test_0_path): if ".png" not in img_name: continue _width, _height, rows, _info = png.Reader(filename="{}{}".format(test_0_path, img_name)).read() test[0].append(lbp_features(get_greyscale_matrix(rows), vdiv=1, hdiv=1)) for img_name in os.listdir(test_1_path): if ".png" not in img_name: continue _width, _height, rows, _info = png.Reader(filename="{}{}".format(test_1_path, img_name)).read() test[1].append(lbp_features(get_greyscale_matrix(rows), vdiv=1, hdiv=1)) with open('T02/test_lbps.p', 'wb') as fp: pickle.dump(test, fp)
def extract_features(dirpath, fmt): st = '*.' + fmt img_names = dirfiles(dirpath + '/', st) n = len(img_names) print(n) for i in range(n): img_path = img_names[i] img = get_image(dirpath + '/' + img_path) escala_grises = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) X_0 = lbp_features(escala_grises, hdiv=1, vdiv=1, mapping='nri_uniform') rojo = lbp_features(img[:, :, 0], hdiv=1, vdiv=1, mapping='nri_uniform') verde = lbp_features(img[:, :, 1], hdiv=1, vdiv=1, mapping='nri_uniform') azul = lbp_features(img[:, :, 2], hdiv=1, vdiv=1, mapping='nri_uniform') # Haralick = haralick_features(img.astype(int)) # hog = hog_features(i, v_windows=3, h_windows=3, n_bins=8) features = np.asarray(np.concatenate((X_0, rojo, verde, azul))) if i == 0: m = features.shape[0] data = np.zeros((n, m)) print('size of extracted features:') print(features.shape) data[i] = features return data
def local_binary_patterns(img, shape, mapping): pixel_w, pixels_h = shape return feature_extraction.lbp_features(img, hdiv=pixel_w, vdiv=pixels_h, mapping=mapping)
def extract_features_image(fxi, img=None, bw=None): # img grayscale image, bw binary image # check if I2 is a binary image (segmented region) if fxi == 'lbp': X = lbp_features(img, region=bw, hdiv=1, vdiv=1, mapping='nri_uniform') elif fxi == 'lbp-ri': X = lbp_features(img, region=bw,hdiv=1, vdiv=1, mapping='uniform') elif fxi == 'gabor': nr = 8 nd = 8 X = gabor_features(img, region=bw,rotations=nr, dilations=nd) elif fxi == 'gabor-ri': nr = 8 nd = 8 Y = gabor_features(img, region=bw,rotations=nr, dilations=nd) X = np.zeros((nd+3,)) for j in range(nd): X[j] = np.sum(X[j*nr:(j+1)*nr-1]) X[nr] = Y[nr*nd] X[nr+1] = Y[nr*nd+1] X[nr+2] = Y[nr*nd+2] elif fxi == 'hog': X = hog_features(img, region=bw, v_windows=1, h_windows=1, n_bins=9, normalize=False, labels=False, show=False) elif fxi == 'haralick-1': X = haralick_features(img, region=bw,distance=1) elif fxi == 'haralick-2': X = haralick_features(img, region=bw,distance=2) elif fxi == 'haralick-3': X = haralick_features(img, region=bw,distance=3) elif fxi == 'haralick-5': X = haralick_features(img, region=bw,distance=5) elif fxi == 'haralick-7': X = haralick_features(img, region=bw,distance=7) elif fxi == 'fourier': X = fourier_features(img, region=bw) elif fxi == 'basicint': X = basic_int_features(img, region=bw) elif fxi == 'clp': X = clp_features(img) elif fxi == 'contrast': X = contrast_features(img, region=bw) elif fxi == 'dct': X = dct_features(img, region=bw) elif fxi == 'basicgeo': X = basic_geo_features(bw) elif fxi == 'centroid': f = basic_geo_features(bw) X = f[0:2] elif fxi == 'fourierdes': X = fourier_des_features(bw) elif fxi == 'flusser': X = flusser_features(bw) elif fxi == 'gupta': X = gupta_features(bw) elif fxi == 'hugeo': X = hugeo_features(bw) elif fxi == 'ellipse': X = ellipse_features(bw) else: print('ERROR: ' + fxi + ' does not exist as geometric feature extraction method.') features = np.asarray(X) return features
for filename in os.listdir(directory): if filename.endswith(".png"): # Para un archivo "face_xxx_nn.png", ID es xxx y nn es nn, ambos en int id_nn = filename[:-4].split("_") ID = int(id_nn[1]) nn = int(id_nn[2]) if ID % 2 == 1: # Nos quedamos con los impares if nn <= 7: # y solo los 7 primeros i += 1 # Leemos la imágen y obtenemos sus features dadas por lbp im = imread(f"{directory}/{filename}") lbp = lbp_features(im, hdiv=hdiv, vdiv=vdiv) # Guardamos los resultados LBPs.append(lbp) IDs.append(ID) NNs.append(nn) if i % 10 == 0: progressBar(i, 350, bar_length=20) print(f"\nLBPs Calculado\nLBP Shape: {len(LBPs)}, {len(LBPs[0])}\nIDs" + f"Shape: {len(IDs)}, 1") # 2) Antes de la selección de features, realizaremos un cleansing con Clean. # Luego realizaremos SFS. Xtrain, Xtest, Ytrain, Ytest = [], [], [], []