def preprocess(feature_abstract_method): # X_raw = raw_data.iloc[:, 1:] # y_raw = raw_data['label'] # X_train, X_test, y_train, y_test = train_test_split(X_raw, y_raw, test_size=0.2) # X_train.to_csv('x_train.csv') # X_test.to_csv('x_test.csv') # y_train.to_csv('y_train.csv') # y_test.to_csv('y_test.csv') X_train = pd.read_csv('x_train.csv', index_col=0) X_test = pd.read_csv('x_test.csv', index_col=0) y_train = pd.read_csv('y_train.csv', index_col=0, header=None) y_test = pd.read_csv('y_test.csv', index_col=0, header=None) if (feature_abstract_method == 'LBP'): X_train = LBP.lbp_extract(X_train) X_test = LBP.lbp_extract(X_test) elif (feature_abstract_method == 'PCA'): X_train, X_test = PCA.PCA_extract(X_train, X_test) elif (feature_abstract_method == 'skeleton'): X_train = SKELETON.skeleton_extract(X_train) X_test = SKELETON.skeleton_extract(X_test) elif (feature_abstract_method == 'grid'): X_train = GRID.grid_extract(X_train) X_test = GRID.grid_extract(X_test) elif (feature_abstract_method == 'hog'): X_train = HOG.hog_extract(X_train) X_test = HOG.hog_extract(X_test) return X_train, X_test, y_train, y_test
def cargar_datos(PATH_POSITIVE, PATH_NEGATIVE): data = [] clases = [] # Casos positivos counter_positive_samples = 0 for filename in os.listdir(PATH_POSITIVE): if filename.endswith(IMAGE_EXTENSION): filename = PATH_POSITIVE+filename img = cv2.imread(filename) hog = cv2.HOGDescriptor() descriptor_1 = hog.compute(img) lbp = LBP.LocalBinaryPattern(img) descriptor_2 = lbp.compute_lbp_clasic() #descriptor = lbp.compute_lbp_uniform() descriptor = np.concatenate((descriptor_1, descriptor_2)) data.append(descriptor) clases.append(1) counter_positive_samples += 1 print("Leidas " + str(counter_positive_samples) + " imágenes de -> peatones") print("Leidas " + str(counter_positive_samples) + " imágenes de -> peatones") print(np.shape(data)) # Casos negativos counter_negative_samples = 0 for filename in os.listdir(PATH_NEGATIVE): if filename.endswith(IMAGE_EXTENSION): filename = PATH_NEGATIVE+filename img = cv2.imread(filename) hog = cv2.HOGDescriptor() descriptor_1 = hog.compute(img) lbp = LBP.LocalBinaryPattern(img) descriptor_2 = lbp.compute_lbp_clasic() #descriptor = lbp.compute_lbp_uniform() descriptor = np.concatenate((descriptor_1, descriptor_2)) data.append(descriptor) clases.append(0) counter_negative_samples += 1 print("Leidas " + str(counter_negative_samples) + " imágenes de -> fondo") print("Leidas " + str(counter_negative_samples) + " imágenes de -> fondo") print(np.shape(data)) return np.array(data), np.array(clases)
def recognize(cf, img): # caricamento dei vari dataset gallery_target = np.load("npy_db/gallery_target.npy") histogram_gallery_data = np.load("npy_db/histogram_gallery_data.npy") gallery_thresholds = np.load("npy_db/gallery_thresholds.npy") users = pd.read_csv('dataset_user.csv', index_col=[0]) cf_list = users['Codice Fiscale'] # controlla se l'utente esiste if not cf_list.tolist().__contains__(cf): print("UTENTE NON PRESENTE! Il codice fiscale inserito è:", cf) return None, 0, None # inizializzazione delle variabili che conterranno i dati del paziente e l'indice in cui si trova in dataset_user user = None index = cf_list.tolist().index(cf) # l'immagine in input viene normalizzata per poter utilizzare LBP e ottenerne l'istogramma relativo norm_image = cv2.normalize(img, None, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_32F) norm_image = norm_image.astype(np.uint8) lbp = LBP.Local_Binary_Pattern(1, 8, norm_image) hist = lbp.createHistogram(lbp.compute_lbp()) # calcola la corrispondenza massima tra l'istogramma dell'immagine in input e quelli delle immagini nella galleria dell'utente val = topMatch(cf, gallery_target, histogram_gallery_data, hist) # se la similairtà massima è maggiore o uguale alla soglia adattativa del paziente, l'identità viene verificata if val >= gallery_thresholds[index]: user = users.iloc[index] # ritorna le informazioni del paziente e l'indice in cui si trova in dataset_user return user, index, user
def descriptor(name, arg): model = None if name == "LBP": model = LBP(arg) elif name == "RAD": model = RAD(arg) return model
def identify(cf, img): # caricamento dei vari dataset gallery_target = np.load("npy_db/gallery_target.npy") histogram_gallery_data = np.load("npy_db/histogram_gallery_data.npy") users = pd.read_csv('dataset_user.csv', index_col=[0]) gallery_thresholds = np.load("npy_db/gallery_thresholds.npy") galley_users = list(dict.fromkeys(gallery_target)) cf_list = users['Codice Fiscale'] # controlla se l'utente esiste if not cf_list.tolist().__contains__(cf): print("UTENTE NON PRESENTE! Il codice fiscale inserito è:", cf) return None, 0, None # informazioni del paziente utilizzando cf index = cf_list.tolist().index(cf) user = users.iloc[index] # lista dei delegati del paziente delegati = ast.literal_eval(user["Delegati"]) # se il paziente non ha nessun delegato, allora termina l'operazione if len(delegati) == 0: print("L'utente non ha delegati!") return None, 0, None # si inizializzano le variabili per la similarity e l'identità del delegato max = 0 identity = None # l'immagine in input viene normalizzata per poter utilizzare LBP e ottenerne l'istogramma relativo norm_image = cv2.normalize(img, None, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_32F) norm_image = norm_image.astype(np.uint8) lbp = LBP.Local_Binary_Pattern(1, 8, norm_image) hist = lbp.createHistogram(lbp.compute_lbp()) # per ogni delegato for d in delegati: # ottengo il miglior valore ottenuto confrontando l'istograma con quelli del delegato nella galleria val = topMatch(d, gallery_target, histogram_gallery_data, hist) th_index = galley_users.index(d) # confrontiamo la similarity con quella massimo ottenuta finora e con il threshold del delegato # se la similarity supera quella massima e il threshold, allora aggiorniamo le variabili if val > max and val >= gallery_thresholds[th_index]: max = val # il più alto valore di similarity attuale identity = d # identità del delegato che ha ottenuto il miglior valore per il momento # se c'è stato un riconoscimento tra i delegati if identity is not None: indexd = cf_list.tolist().index(identity) recUser = users.iloc[indexd] # ritorna i dati del paziente, l'indice in cui si trova in dataset_user e le informazioni del delegato return user, index, recUser else: # altrimenti ritorna None come paziente, 0 come indice e None come delegato return None, 0, None
def convert_image_to_hist(self, image): print(image) image = cv2.imread(image) norm_image = get_normalized(image) myLBP = LBP.Local_Binary_Pattern(1, 8, norm_image) new_img = myLBP.compute_lbp() hist = myLBP.createHistogram(new_img) return hist
def split_gallery_probe(self, data, target, cfs): num_user = self.num_user(target) # calcola il numero di template per utente unique, counts = np.unique(target, return_counts=True) occurrences = dict(zip(unique, counts)) # numero di utenti che non sono nella gallery pn_user = round(num_user * pn / 100) # conteggio dei template countTemp = 0 # conteggio countUser = 0 # gallery_target, gallery_data, pn_data, pn_target, pg_data, pg_target = [], [], [], [], [], [] gallery_target, gallery_data, pn_data, pn_target, pg_data, pg_target, histogram_gallery_data, \ histogram_pg_data, histogram_pn_data = [], [], [], [], [], [], [], [], [] # prendo il numero di template dell'utente. si parte dal presupposto che tutti gli utenti # hanno lo stesso numero di template occ = occurrences[target[0]] # numero di template per il probe set self.n_template_x_user = round(occ * probe / 100) # per ogni utente for i, val in enumerate(target): name = cfs[countUser] norm_template = self.get_normalized_template(i, data) lbp = LBP.Local_Binary_Pattern(1, 8, norm_template) # se il numero del template e' minore del numero massimo di template destinati alla gallery # e il numero di utenti rientra negli utenti che sono nella gallery, allora inserisco il template nella # gallery if (countTemp < occ - self.n_template_x_user or occ == 1) and countUser < num_user - pn_user: gallery_data.append(norm_template) gallery_target.append(name) histogram_gallery_data.append( lbp.createHistogram(lbp.compute_lbp())) else: # se lo inserisco nel probe set, controllo se l'utente e' nella gallery o no if countUser < num_user - pn_user: pg_data.append(norm_template) pg_target.append(name) histogram_pg_data.append( lbp.createHistogram(lbp.compute_lbp())) else: pn_data.append(norm_template) pn_target.append(name) histogram_pn_data.append( lbp.createHistogram(lbp.compute_lbp())) countTemp += 1 # se ho finito i template dell'utente, passo all'utente successivo if countTemp == occ: countTemp = 0 countUser += 1 return gallery_data, gallery_target, pn_data, pn_target, pg_data, pg_target, histogram_gallery_data, histogram_pg_data, histogram_pn_data
def __init__(self, model_name, folder_path=None): self.model_name = model_name self.folder_path = folder_path self.split_windows = False self.model = None if self.model_name == 'LBP': self.model = LBP.LocalBinaryPatterns(8, 1) self.split_windows = True elif self.model_name == 'HOG': self.model = HOG.Hog(orientations=9, pixels_per_cell=(8, 8), cells_per_block=(2, 2)) elif self.model_name == 'CM': self.model = ColorMoments.ColorMoments() self.split_windows = True elif self.model_name == 'SIFT': self.model = SIFT.SIFT()
def microTextureVideo(self, pathVid): cap = cv2.VideoCapture(pathVid) val = False # andiamo a prendere un frame e lo convertiamo in scala di grigi. while True: ret, frame = cap.read() try: vis = frame.copy() except Exception as e: print(str(e)) break # convertiamo in scala di grigi. gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) # effettuiamo il ritaglio del viso crop = detect_face(gray, vis) # qui effettuiamo la conversione dell'immagine croppata in LBP if crop is not None: myLBP = LBP.Local_Binary_Pattern(1, 8, crop) else: continue new_img = myLBP.compute_lbp() # creiamo l'histogram dell'immagine calcolata in lpb hist = myLBP.createHistogram(new_img) # Andiamo a prendere il modello trained e salvato. with open('modelSVM.pkl', 'rb') as f: clf = pickle.load(f) hist = hist.reshape(1, -1) # attraverso il classificatore che abbiamo recuperato, ci facciamo dire se l'immagine è reale oppure no value = (clf.predict(hist)) print(value) if value == 0: print("REAL") val = True break else: print("FAKE") val = False break cap.release() cv2.destroyAllWindows() return val
def main(): ap = argparse.ArgumentParser( description='Run the local binary patterns algorithm using a basic 3x3.' ) ap.add_argument('-i', '--input', dest='input', type=str, required=True, help='file name with path of the input image') arguments = ap.parse_args() input_file = arguments.input #'data/simpsons/Test/bart116.jpg' if os.path.isfile(input_file): run = LBP.LBP(input_file) print("RUNNING algorithm developed") run.execute() #print("RUNNING scikit-image") #run.compare() else: print("File '{}' does not exist.".format(input_file))
'C:/Users/rebeb/Documents/TU_Wien/Dipl/FID-300/FID-300/FID-300/test_images/results/SIFT/output_SIFT_matched_00197_alternative.jpg', outputMatched) cv.imshow("keypoint mask", results) cv.imshow("matched", outputMatched) cv.waitKey(0) cv.destroyAllWindows() if LBPLearning: # patterns = LBP.threeLayeredLearning(images, masks) patterns = np.loadtxt( 'C:/Users/rebeb/Documents/TU_Wien/Dipl/FID-300/FID-300/FID-300/test_images/training/discriminative_4_12_3.txt', delimiter=',') img = cv.imread( 'C:/Users/rebeb/Documents/TU_Wien/Dipl/FID-300/FID-300/FID-300/test_images/hard/00197.jpg', 0) lbpImage = LBP.getLBPImage(img, 4, 12, 3) height, width = img.shape res = np.zeros((height, width), np.float32) print("calculating") for x in range(width): for y in range(height): currentHisogram = lbpImage[y, x] currentHisogram = np.float32(currentHisogram) maxVal = 0 for pattern in patterns: val = cv.compareHist(currentHisogram, np.float32(np.asarray(pattern)), cv.HISTCMP_CORREL) # if val > 0.75: # res[y, x] = 1 # continue
methods = ["HOG","LBP","HOGLBP"] method = methods[0] if method == "HOG": print("__________________________________________________________________") print("Computing HOG...") X_train = compute_hog_set(X) elif method == "LBP": print("__________________________________________________________________") print("Computing LBP...") # Calculate LBP values for each image lbp_images = [] for i in range(0, len(X)): lbp_images.append(lbp.lbp_compute(X[i])) # Calculate histograrms for each image uniform = False print("Computing Histograms...uniform:",str(uniform)) histograms = [] for i in range(0, len(lbp_images)): # IMPORTANT, set uniform to False or True for LBP/LBPU histograms.append(lbp.lbp_hist(lbp_images[i], step=8, win_size=16, uniform = uniform)) X = [elem for histogram in histograms for elem in histogram] for i in range(0,len(histograms)): histograms[i] = np.concatenate(histograms[i])
test_images, test_classes = utils.load_data(TEST_IMAGES_FOLDER, IMG_EXTENSION) print("Calculando descriptores") train_descriptors = utils.compute_lbp(train_images, uniform=True) test_descriptors = utils.compute_lbp(test_images, uniform=True) descriptors = np.vstack((train_descriptors, test_descriptors)) labels = np.concatenate((train_classes, test_classes)) print("Entrenando el clasificador") classifier = utils.train(descriptors.astype(np.float32), labels, kernel=cv2.ml.SVM_POLY, params={'degree': 2}) detector = LBP.LBPDetector(8, 8, 16, 16, 128, 64, 8, 8, classifier) print("Detectando en Abbey Road") abbey_road = cv2.imread("abbey_road.jpeg") peds = detector.detect(abbey_road, [0.8, 1, 1.2]) for ped in peds: cv2.rectangle(abbey_road, (ped[1], ped[0]), (ped[3], ped[2]), (0, 0, 255), 1) cv2.imwrite("abbey_road_dets_multiscale.jpeg", abbey_road) print("Detectando en Pedestrians") pedestrians = cv2.imread("street.jpg")
histograma = 0 for (i,rect) in enumerate(rects): if imagini.endswith(".jpg"): cv2.imshow("imaginegrey",img_data) (x, y, w, h) = face_utils.rect_to_bb(rect) b = cv2.rectangle(img_data, (x, y), (x + w, y + h),10,0) c = b[y:y+h,x:x+w] try: c = cv2.resize(c,(154,154)) c1 = copy.deepcopy(c) lbp = LBP.LBP(c,c1) cnt_imagini_total += 1 histograma = calcul_histograma.calc_hist(lbp) l_imag.append(folder) l_hist_imag.append(histograma) f = open(subfolder+"_"+imagini[0:-4]+"_"+"hist.txt", "w+") f.write(str(list(histograma))) f.close() lbp = 0 folder = ""
def classifier(name, arg): model = None if name == "asdfasdfnlasdflas": model = LBP(arg) return model
# ======================= # now for the main script # ======================= # first set basic display options numpy.set_printoptions(precision=3) #numpy.set_printoptions(linewidth=135) matplotlib.pyplot.ion() # then run some very basic tests. # this is to catch silly bugs while developing the library being used. print("testing basic MRF creation and message passing.") mrf = LBP.MRF(7, 7, 3) base_beliefs = numpy.ones(shape=(7, 7, 3)) mrf.init_base_belief(base_beliefs) smoothness = numpy.ones(shape=(3, 3)) mrf.init_smoothness(smoothness) mrf.pass_messages() #print(mrf) # as that must have worked, now test on the tsukuba pair. # this will be much more involved, # but is still a fairly basic application. # most of the work here is in setting up the base belief array. print("testing tsukaba pair stereo matching") input_left = matplotlib.image.imread('tsukuba-imL.png')
def compute_lbp(img_list, uniform=False): lbp = LBP.LBPDescriptor(8, 8, 16, 16, 128, 64, uniform) lbp_list = [lbp.compute(img) for img in img_list] return np.array(lbp_list)