mode="RGB") resized2 = imresize(newim[3:6, :, :], (feature_width, feature_height), interp="bicubic", mode="RGB") # re-packge into a new X entry newX = np.concatenate([resized1, resized2], axis=2) # the next line is important. # if you don't normalize your data, all predictions will be 0 forever. newX = newX / 255.0 return newX (X_train, y_train), (X_test, y_test) = lfw.load_data("deepfunneled") # print(y_train[:20]) # the data, shuffled and split between train and test sets X_train = np.asarray( [crop_and_downsample(x, downsample_size) for x in X_train]) X_test = np.asarray([crop_and_downsample(x, downsample_size) for x in X_test]) X_train_img1_flipped = np.flip(X_train[:, :, :, 0:3], 2) X_train_img2_flipped = np.flip(X_train[:, :, :, 3:6], 2) X_train_extra = np.concatenate((X_train_img1_flipped, X_train_img2_flipped), axis=3) # print (X_train_extra.shape)
import numpy as np from lfw_fuel import lfw from models import lenet from clean import clean from keras.callbacks import TensorBoard from keras.callbacks import Callback import matplotlib.pyplot as plt from keras.models import load_model # Load the data, shuffled and split between train and test sets (X_train_orig, y_train_orig), (X_test_orig, y_test_orig) = lfw.load_data("deepfunneled") # Preprocess the images (X_train, y_train), (X_test, y_test) = clean(X_train_orig, y_train_orig, X_test_orig, y_test_orig) tb = TensorBoard(log_dir='./lenet_logs', write_graph=True, histogram_freq=1, write_images=True, embeddings_freq=0) class LossHistory(Callback): def on_train_begin(self, logs={}): self.losses = [] def on_batch_end(self, batch, logs={}): self.losses.append(logs.get('loss'))
import numpy as np from lfw_fuel import lfw from models import vgg from clean import clean from keras.callbacks import TensorBoard from keras.callbacks import Callback import matplotlib.pyplot as plt from keras.models import load_model # Load the data, shuffled and split between train and test sets (X_train_orig, y_train_orig), (X_test_orig, y_test_orig) = lfw.load_data("deepfunneled") # Preprocess the images (X_train, y_train), (X_test, y_test) = clean(X_train_orig, y_train_orig, X_test_orig, y_test_orig) tb = TensorBoard(log_dir='./vgg_logs', write_graph=True, histogram_freq=1, write_images=True, embeddings_freq=0) class LossHistory(Callback): def on_train_begin(self, logs={}): self.losses = [] def on_batch_end(self, batch, logs={}): self.losses.append(logs.get('loss'))
batch_size = 128 nb_classes = 2 nb_epoch = 12 feature_width = 32 feature_height = 32 def cropImage(im): im2 = np.dstack(im).astype(np.uint8) # return centered 128x128 from original 250x250 (40% of area) newim = im2[61:189, 61:189] sized1 = imresize(newim[:,:,0:3], (feature_width, feature_height), interp="bicubic", mode="RGB") sized2 = imresize(newim[:,:,3:6], (feature_width, feature_height), interp="bicubic", mode="RGB") return np.asarray([sized1[:,:,0], sized1[:,:,1], sized1[:,:,2], sized2[:,:,0], sized2[:,:,1], sized2[:,:,2]]) # the data, shuffled and split between train and test sets (X_train, y_train), (X_test, y_test) = lfw.load_data("deepfunneled") # crop features X_train = np.asarray(map(cropImage, X_train)) X_test = np.asarray(map(cropImage, X_test)) # print shape of data while model is building print("{1} train samples, {2} channel{0}, {3}x{4}".format("" if X_train.shape[1] == 1 else "s", *X_train.shape)) print("{1} test samples, {2} channel{0}, {3}x{4}".format("" if X_test.shape[1] == 1 else "s", *X_test.shape)) # convert class vectors to binary class matrices Y_train = np_utils.to_categorical(y_train, nb_classes) Y_test = np_utils.to_categorical(y_test, nb_classes) model = Sequential() model.add(Convolution2D(32, 6, 3, 3, border_mode='full'))