コード例 #1
0
def get_LBP_Features(trainingPath, testingPath, p, r):
	from localbinarypatterns import LocalBinaryPatterns
	from sklearn.utils import shuffle

	# initialize the local binary patterns descriptor along with the data and label lists
	desc = LocalBinaryPatterns(p, r)
	data = []
	labels = []
	test_data = []
	test_labels = []
	
	start_time = time.time()

	# loop over the training images
	for imagePath in paths.list_files(trainingPath, validExts=(".png",".ppm")):
		
		# load the image, convert it to grayscale, and describe it
		image = cv2.imread(imagePath)
		gray = np.matrix(cv2.cvtColor(image, cv2.COLOR_BGR2GRAY))
		resized_image = cv2.resize(gray, (32, 32))
		hist = desc.describe(resized_image)
		hist = hist / max(hist)

		# extract the label from the image path, then update the
		# label and data lists
		labels.append(imagePath.split("/")[-2])
		data.append(hist)

	# loop over the testing images
	for imagePath in paths.list_files(testingPath, validExts=(".png",".ppm")):

		# load the image, convert it to grayscale, describe it, and classify it
		image = cv2.imread(imagePath)
		gray = np.matrix(cv2.cvtColor(image, cv2.COLOR_BGR2GRAY))
		resized_image = cv2.resize(gray, (32, 32))
		hist = desc.describe(resized_image)
		hist = hist / max(hist)

		# extract the label from the image path, then update the
		# label and data lists
		test_labels.append(imagePath.split("/")[-2])
		test_data.append(hist)

	feature_extraction_runtime = (time.time() - start_time)

	data = np.array(data)
	labels = np.array(labels)
	test_data = np.array(test_data)
	test_labels = np.array(test_labels)

	data, labels = shuffle(data,labels)

	print "[INFO] LBP Features are ready!"
	print "Total image:", len(data) + len(test_data)
	print "Feature extraction runtime:", feature_extraction_runtime
	print "Average for one image:", feature_extraction_runtime / (len(data) + len(test_data))

	return (data, labels, test_data, test_labels)
コード例 #2
0
def lbp_for_one_image(faces, image):
    if len(faces) == 0:
        print "No faces detected."
        return
    elif len(faces) > 1:
        print "Sorry, more than one facial expression is not supported now."
        return
    else:
        x, y, w, h = faces[0]
        img = image[y: y+h, x: x + w]   # face part
        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        desc = LocalBinaryPatterns(8, 1)
        desc.describe(gray, 2)
コード例 #3
0
def ExtractFeatures(image):
    """
    Extract Features. 
        -> Features Extracted: 
            * LBP
            * Bfx_Basicint
            * Haralick
            * free Threshold Adjacency Statistics
            * Zernike Moments
            * HOG

    """
    gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)

    #LBP Features
    desc = LocalBinaryPatterns(12, 4)
    lbp_hist = desc.describe(gray_image)
    lbp_hist = lbp_hist.reshape(1, len(lbp_hist))

    #Bfx_Basicint
    R, _, _ = Bim_segbalu(gray_image)
    options = {'show': True, 'mask': 5}
    basicint, Xn = Bfx_basicint(gray_image, R, options)

    #Haralick features
    haralick = mahotas.features.haralick(gray_image).mean(0)
    haralick = haralick.reshape(1, len(haralick))

    #parameter free Threshold Adjacency Statistics
    pftas = mahotas.features.pftas(gray_image)
    pftas = pftas.reshape(1, len(pftas))

    #Zernike Moments
    zernike = mahotas.features.zernike_moments(gray_image, radius=2)
    zernike = zernike.reshape(1, len(zernike))

    #HOG [Fix Dimentionality]
    HOG = hog(gray_image,
              orientations=8,
              pixels_per_cell=(16, 16),
              cells_per_block=(1, 1),
              visualise=False)
    HOG = HOG.reshape(1, len(HOG))

    #Join Features
    features = np.concatenate(
        (lbp_hist, basicint, haralick, pftas, zernike, HOG), axis=1)

    return features
                "--testing",
                required=True,
                help="path to the tesitng images")
args = vars(ap.parse_args())

# initialize the local binary patterns descriptor along with
# the data and label lists
desc = LocalBinaryPatterns(24, 8)
data = []
labels = []
# loop over the training images
for imagePath in paths.list_images(args["training"]):
    # load the image, convert it to grayscale, and describe it
    image = cv2.imread(imagePath)
    gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    hist = desc.describe(gray)

    # extract the label from the image path, then update the
    # label and data lists
    labels.append(imagePath.split(".")[-2])
    # labels.append(os.path.split(os.path.dirname(imagePath))[-1])

    data.append(hist)

# train a Linear SVM on the data
model = LinearSVC(C=100.0, random_state=42)
model.fit(data, labels)
# loop over the testing images
for imagePath in paths.list_images(args["testing"]):
    # load the image, convert it to grayscale, describe it,
    # and classify it
コード例 #5
0
def get_all_samples(path, p=24, r=8):
    # initialize the local binary patterns descriptor along with the data and label lists
    desc = LocalBinaryPatterns(p, r)
    data = []
    labels = []
    classSamplesList = []
    samples_amount_of_classes = []
    currentClass = None
    flag = False

    class_list = os.listdir(path)
    class_list.remove('.DS_Store')
    class_list.remove('Readme.txt')
    counter = len(class_list)

    lastClassPath = ''
    # loop over the training images
    for imagePath in paths.list_files(path, validExts=(".png", ".ppm")):
        if (flag == False):
            currentClass = imagePath.split("/")[-2]
            labels.append(currentClass)
            counter -= 1
            flag = True
        else:
            if imagePath.split("/")[-2] != currentClass:
                currentClass = imagePath.split("/")[-2]
                classSamplesList.append(np.transpose(np.array(data)))
                samples_amount_of_classes.append(len(data))
                data = []
                labels.append(currentClass)
                counter -= 1
        if counter == 0:
            lastClassPath = imagePath
            break

        # load the image, convert it to grayscale, and describe it
        image = cv2.imread(imagePath)
        gray = np.matrix(cv2.cvtColor(image, cv2.COLOR_BGR2GRAY))
        resized_image = cv2.resize(gray, (32, 32))
        hist = desc.describe(resized_image)
        hist = hist / max(hist)

        # extract the label from the image path
        data.append(hist)

    data = []
    head, _ = os.path.split(lastClassPath)

    for imagePath in paths.list_files(head, validExts=(".png", ".ppm")):
        # load the image, convert it to grayscale, and describe it
        image = cv2.imread(imagePath)
        gray = np.matrix(cv2.cvtColor(image, cv2.COLOR_BGR2GRAY))
        resized_image = cv2.resize(gray, (32, 32))
        hist = desc.describe(resized_image)
        hist = hist / max(hist)
        # extract the label from the image path
        data.append(hist)

    classSamplesList.append(np.transpose(np.array(data)))
    samples_amount_of_classes.append(len(data))

    all_samples = tuple(classSamplesList)
    all_samples = np.concatenate(all_samples, axis=1)
    """
    for i, val in enumerate(samples_amount_of_classes):
        print i, val
    raw_input()
    """
    return all_samples, labels, samples_amount_of_classes
コード例 #6
0
def main():
	# construct the argument parse and parse the arguments
	ap = argparse.ArgumentParser()
	ap.add_argument("-t", "--training", required=True,
		help="path to the training images")
	ap.add_argument("-e", "--testing", required=True, 
		help="path to the tesitng images")
	args = vars(ap.parse_args())

	# initialize the local binary patterns descriptor along with the data and label lists
	desc = LocalBinaryPatterns(24, 8)
	data = []
	labels = []

	# initialize the actual and predicted vectors
	y_act = []
	y_pred = []


	# loop over the training images
	for imagePath in paths.list_files(args["training"], validExts=(".png",".ppm")):
		
		# load the image, convert it to grayscale, and describe it
		image = cv2.imread(imagePath)
		gray = np.matrix(cv2.cvtColor(image, cv2.COLOR_BGR2GRAY))
		resized_image = cv2.resize(gray, (32, 32))
		hist = desc.describe(resized_image)
		hist = hist / max(hist)

		# extract the label from the image path, then update the
		# label and data lists
		labels.append(imagePath.split("/")[-2])
		data.append(hist)


	# shuffle datas and labels before trained a model
	X = np.array(data)
	y = np.array(labels)
	#X, y = shuffle(X,y)

	# find best parameters before trained a model
	best_parameters = svc_param_selection(X,y)
	best_gamma =  best_parameters.get("gamma")
	best_kernel = best_parameters.get("kernel")
	print str(best_gamma), best_kernel
	# raw_input()

	# train a Linear SVM on the data
	model = svm.NuSVC(nu = 0.01,kernel=best_kernel, gamma=best_gamma)
	model.fit(X, y)


	# loop over the testing images
	for imagePath in paths.list_files(args["testing"], validExts=(".png",".ppm")):

		# load the image, convert it to grayscale, describe it, and classify it
		image = cv2.imread(imagePath)
		gray = np.matrix(cv2.cvtColor(image, cv2.COLOR_BGR2GRAY))
		resized_image = cv2.resize(gray, (32, 32))
		hist = desc.describe(resized_image)
		hist = hist / max(hist)

		# prediction for each test image
		prediction = model.predict([hist])[0]

		# prepare data to append into lists
		inpt = str(imagePath.split("/")[-2])
		outpt = str(prediction)
		y_act.append(int(inpt)) 
		y_pred.append(int(outpt))


	# calculate match_count and accuracy to sho test result
	match_count = sum([int(y==y_) for y, y_ in zip(y_act, y_pred)])
	accuracy = float(match_count) / float(len(y_act))
	print "\nAccuracy:" + str(accuracy) + "\n"
コード例 #7
0
# Ścieżka główna do folderu z liśćmi
trainingMainPath = "/home/krzysztof/Dokumenty/SNR_grupa1/Folio Leaf Dataset/Folio"
# paths - wszystkie (pełne) ścieżki do zdjęć liści
paths = metody.getListOfFiles(
    trainingMainPath)  # Lista wszystkich plików w folderze
desc = LocalBinaryPatterns(
    16, 2
)  # Obiekt klasy LBP - deksryptor LBP 8 - liczba próbek w sąsiedztwie, 2 - promień sąsiedztwa
data = []
labels = []

licznik = 0  # Do wyświetlania postępou ekstracji
allFiles = len(paths)

print("Ekstrakcja cech\n")
for imagePath in paths:
    image = cv2.imread(imagePath)  # Wczytanie obrazu
    gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)  # Skala szarości
    hist = desc.describe(gray)  # LBP wykonane na obrazie - zwraca histogram

    labels.append(imagePath.split("/")[-2])  # Tylko nazwa kwiatu
    data.append(hist)
    print("Wykonano: " + str(licznik / allFiles * 100) + " %\n")
    licznik += 1

data = np.array(data, dtype="float32")
labels = np.array(labels)
# Zapis wyesktrachowanych cech do pliku o łatwym dostępie
with open('LBPdata16_2.pckl', 'wb') as f:
    pickle.dump([data, labels], f)
コード例 #8
0
    #     labels_real.append(im)
    #     data_real.append(hist)

    # Detecção antiga

    gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    rects = detector.detectMultiScale(gray, scaleFactor=1.1, minNeighbors=5, minSize=(30, 30),
            flags=cv2.CASCADE_SCALE_IMAGE)
    for rect in rects:
        face_gray = gray[rect[1]:rect[1]+rect[3], rect[0]:rect[0]+rect[2]]
        pil_gray = Image.fromarray(face_gray)
        open_cv_image = np.array(pil_gray)
        
        open_cv_image = image_resize(open_cv_image, height = 150)

        hist = desc.describe(open_cv_image)

		# extract the label from the image path, then update the
		# label and data lists
        im = imagePath.split(os.path.sep)[-1]
        im = im[0:4]
        labels_real.append(im)
        data_real.append(hist)

fake_id  = 0

# loop over fake images
for imagePath in paths.list_images("frames_fakes/"):
    fake_id += 1
	# load the image, convert it to grayscale, and describe it
    image = cv2.imread(imagePath)
コード例 #9
0
# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-t", "--training", required=True,
	help="path to the training images")
ap.add_argument("-e", "--testing", required=True, 
	help="path to the tesitng images")
args = vars(ap.parse_args())

# initialize the local binary patterns descriptor along with
# the data and label lists
desc = LocalBinaryPatterns(24, 8)
data = []
labels = []

# loop over the training images
for imagePath in paths.list_images(args["training"]):
	# load the image, convert it to grayscale, and describe it
	image = cv2.imread(imagePath)
	gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
	hist = desc.describe(gray)

	# extract the label from the image path, then update the
	# label and data lists
	labels.append(imagePath.split("/")[-2])
	data.append(hist)

# train a Linear SVM on the data
model = LinearSVC(C=100.0, random_state=42)
model.fit(data, labels)
コード例 #10
0
from Bfx_basicint import Bfx_basicint
from skimage.feature import hog
import numpy as np
from mahotas.features import surf

#Load Image
filename = 'Cl_1_2_AB.png'
image = cv2.imread(filename)
image = image[0:32, 0:32]

#convert image to grayscale
gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)

#LBP Features
desc = LocalBinaryPatterns(12, 4)
lbp_hist = desc.describe(gray_image)
lbp_hist = lbp_hist.reshape(1, len(lbp_hist))

#Bfx_Basicint
R, _, _ = Bim_segbalu(gray_image)
options = {'show': True, 'mask': 5}
basicint, Xn = Bfx_basicint(gray_image, R, options)

#Haralick features
haralick = mahotas.features.haralick(gray_image).mean(0)
haralick = haralick.reshape(1, len(haralick))

#parameter free Threshold Adjacency Statistics
pftas = mahotas.features.pftas(gray_image)
pftas = pftas.reshape(1, len(pftas))
コード例 #11
0
class CloudFeatureHistogram:
    def __init__(self, doHSVhist=False):
        # store the number of points and radius
        #self.image = image
        self.doHSVhist = doHSVhist

        # initialize the local binary pattern descriptor
        self.desc = LocalBinaryPatterns(24, 8)

    def get_feature_histogram(self, image):
        # load the image, convert it to grayscale, describe it,
        # and classify it

        # TOO SMOOTH
        #filtered = cv2.bilateralFilter(image, 7, 75, 75)
        # Median blur : remove large outliers
        #filtered = cv2.medianBlur(image, 5)  DON'T DO THIS EITHER!!!
        gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
        #gray = cv2.cvtColor(filtered, cv2.COLOR_BGR2GRAY)


        # Do a histogram equalization to bring out contrast
        # create a CLAHE object (Arguments are optional).
        # is this slow????  - NO, SOMETHING ELSE IS MAKING SLIDING WINDOWS SLOW...............
        clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8))
        claheGray = clahe.apply(gray)

        # show the new image
        # cv2.imshow('claheGray', claheGray)
        # cv2.waitKey(0)
        # cv2.destroyAllWindows()

        # local binary pattern histogram
        lbpHist = self.desc.describe(claheGray)

        logging.debug("lbpHist = %s", lbpHist)

        # if self.doHSVhist :
        #
        #     # HSV histogram
        #     # channels = [0, 1] because we need to process both H and S plane.
        #     # bins = [180, 256] 180 for H plane and 256 for S plane.
        #     # range = [0, 180, 0, 256]
        #     # Hue value lies between 0 and 180 & Saturation lies between 0 and 256.
        #     hsv = cv2.cvtColor(filtered, cv2.COLOR_BGR2HSV)
        #
        #     # hsvHistTest = cv2.calcHist([hsv], [1], None, [256], [0, 255])
        #     # cv2.normalize(hsvHistTest, hsvHistTest, 0, 255, cv2.NORM_MINMAX)
        #     #logging.debug("hsvHistTest = %s", hsvHistTest)
        #
        #     # only look at the saturation.  Saturation of white/gray < 0.15 * 255 = 38
        #     (hsvHist, _) = np.histogram(hsv[:,:,1].ravel(),
        #                              bins=np.arange(0, 255),
        #                              #bins=np.arange(0, 7),
        #                              range=(0, 255))
        #
        #     # normalize the histogram
        #     eps = 1e-7
        #     hsvHist = hsvHist.astype("float")
        #     hsvHist /= (hsvHist.sum() + eps)
        #
        #     logging.debug("hsvHist.sum() = %s", hsvHist.sum())
        #     logging.debug("hsvHist = %s", hsvHist)
        #
        #     # Now normalize hsvHist :
        #     # normalize the histogram
        #     # eps = 1e-7
        #     # hsvHist = hsvHist.astype("float")
        #     # hsvHist /= (hsvHist.sum() + eps)
        #
        #     #print "hsvHist = " + str(hsvHist)
        #     #print "hsvHist = " + str(hsvHist.flatten)
        #
        #
        # if self.doHSVhist:
        #     # Make a concatenated feature vector of color statistics and lbp
        #     # texture features
        #     # (horizontally stack the two numpy arrays) [1,2,3] [4,5,6]  ->  [1,2,3,4,5,6]
        #     hist = np.hstack([lbpHist, hsvHist])
        #
        #     #logging.debug("hist = %s", hist )

        #     return hist

        return lbpHist