def byte_image2(byte_code): img_feat = [] # Get image array img = byte_make_image(byte_code) spoints = lbp(img, 10, 10, ignore_zeros=False) img_feat = spoints.tolist() return img_feat
def byte_image2(byte_code): img = byte_make_image(byte_code) spoints = lbp(img, 10, 10, ignore_zeros=False) return spoints.tolist()
from sklearn import svm from sklearn.decomposition import PCA # settings for LBP radius = 2 n_points = 16 # 4116 dimensional lbp histogram X = [] Y = [] with open(sys.argv[1]) as f_in: for filename in f_in: img = mahotas.imread(filename.rstrip(), as_grey = True) img.resize(100,100) lbp_hist = lbp(img, radius, n_points) lbp_hist /= np.linalg.norm(lbp_hist) X.append(lbp_hist) if 'background' in filename: Y.append(0) else: Y.append(1) pca = PCA(n_components=40) X = pca.fit_transform(X) #clf = svm.SVC(kernel='linear') from sklearn.linear_model import LogisticRegressionCV clf = LogisticRegressionCV() clf.fit(X, Y)
def worker(procnum, image_path, image_class, image_name): print("Calculando features da imagem {}".format(procnum)) # image_path = "/home/CIT/bberton/Documents/unicamp/MC886/assignment2/data/train/Motorola-Droid-Maxx/(MotoMax)1.jpg" image = None try: image = misc.imread(image_path) except FileNotFoundError: print("File {} not found.".format(image_path)) exit(0) if image_class: croped_image = img_as_float(crop_center(image, 512, 512)) else: croped_image = img_as_float(image) del image # denoised_image = denoise_bilateral(croped_image, sigma_color=0.05, sigma_spatial=15, multichannel=True) denoised_image = denoise_wavelet(croped_image, multichannel=True) noisy_image = np.array(croped_image - denoised_image) noisy_red, noisy_green, noisy_blue = np.array(noisy_image[:, :, 0]), \ np.array(noisy_image[:, :, 1]), \ np.array(noisy_image[:, :, 2]) lbp_red_features = lbp(noisy_red, 2, 10) lbp_green_features = lbp(noisy_green, 2, 10) lbp_blue_features = lbp(noisy_blue, 2, 10) features = [ # red band noisy_red.mean(), np.var(noisy_red), noisy_red.std(), kurtosis(noisy_red.flatten()), skew(noisy_red.flatten()), # green band noisy_green.mean(), np.var(noisy_green), noisy_green.std(), kurtosis(noisy_green.flatten()), skew(noisy_green.flatten()), # blue band noisy_blue.mean(), np.var(noisy_blue), noisy_red.std(), kurtosis(noisy_blue.flatten()), skew(noisy_blue.flatten()) ] for feature in lbp_red_features: features.append(feature) for feature in lbp_green_features: features.append(feature) for feature in lbp_blue_features: features.append(feature) # image class if image_class: features.append(class_dict[image_class]) if image_name: features.append(image_name) print("Features da imagem {} calculadas!".format(procnum)) return features
def gabor_composite_lbp3(image): return lbp(gabor_composite_from_thetas(image), radius=3, points=16)
import numpy as np from sklearn import svm from sklearn.decomposition import PCA # settings for LBP radius = 2 n_points = 16 # 4116 dimensional lbp histogram X = [] Y = [] with open(sys.argv[1]) as f_in: for filename in f_in: img = mahotas.imread(filename.rstrip(), as_grey=True) img.resize(100, 100) lbp_hist = lbp(img, radius, n_points) lbp_hist /= np.linalg.norm(lbp_hist) X.append(lbp_hist) if 'background' in filename: Y.append(0) else: Y.append(1) pca = PCA(n_components=40) X = pca.fit_transform(X) #clf = svm.SVC(kernel='linear') from sklearn.linear_model import LogisticRegressionCV clf = LogisticRegressionCV() clf.fit(X, Y)
def lbp_r1(image): return lbp(image, radius=1, points=8)
def lbp_r2(image): return lbp(image, radius=2, points=12)
def lbp_r3(image): return lbp(image, radius=3, points=16)
def compute(self, image): return lbp(image, radius=self.radius, points=self.points, ignore_zeros=False)