Example #1
0
def main():
    """Train a k-NN classifier.
    """
    # construct the argument parse and parse the arguments
    args = argparse.ArgumentParser()
    args.add_argument("-d",
                      "--dataset",
                      required=True,
                      help="path to input dataset")
    args.add_argument("-k",
                      "--neighbors",
                      type=int,
                      default=1,
                      help="# of nearest neighbors for classification")
    args.add_argument(
        "-j",
        "--jobs",
        type=int,
        default=-1,
        help="# of jobs for k-NN distance (-1 uses all available cores)")
    args = vars(args.parse_args())

    # grab the list of images that we'll be describing
    print("[INFO] loading images...")
    image_paths = list(paths.list_images(args["dataset"]))

    # initialize the image preprocessor, load the dataset from disk,
    # and reshape the data matrix
    preprocessor = SimplePreprocessor(32, 32)
    loader = SimpleDatasetLoader(preprocessors=[preprocessor])
    (data, labels) = loader.load(image_paths, verbose=500)
    data = data.reshape((data.shape[0], 3072))

    # show some information on memory consumption of the images
    print("[INFO] features matrix: {:.1f}MB".format(data.nbytes /
                                                    (1024 * 1024.0)))

    # encode the labels as integers
    label_encoder = LabelEncoder()
    labels = label_encoder.fit_transform(labels)

    # partition the data into training and testing splits using 75% of
    # the data for training and the remaining 25% for testing
    (train_x, test_x, train_y, test_y) = train_test_split(data,
                                                          labels,
                                                          test_size=0.25,
                                                          random_state=42)

    # train and evaluate a k-NN classifier on the raw pixel intensities
    print("[INFO] evaluating k-NN classifier...")
    model = KNeighborsClassifier(n_neighbors=args["neighbors"],
                                 n_jobs=args["jobs"])
    model.fit(train_x, train_y)
    print(
        classification_report(test_y,
                              model.predict(test_x),
                              target_names=label_encoder.classes_))
Example #2
0
def main():
    """Train ShallowNet on animals dataset.
    """
    # construct the argument parser and parse the arguments
    args = argparse.ArgumentParser()
    args.add_argument("-d", "--dataset", required=True, help="path to input dataset")
    args = vars(args.parse_args())

    # grab the list of images that we'll be describing
    print("[INFO] loading images...")
    image_paths = list(paths.list_images(args["dataset"]))

    # initialize the image preprocessors
    simple_preprocessor = SimplePreprocessor(32, 32)
    image_to_array_preprocessor = ImageToArrayPreprocessor()

    # load the dataset from disk then scale the raw pixel intensities to the range [0, 1]
    dataset_loader = SimpleDatasetLoader(preprocessors=[simple_preprocessor, image_to_array_preprocessor])
    (data, labels) = dataset_loader.load(image_paths, verbose=500)
    data = data.astype("float") / 255.0

    # partition the data into training and testing splits using 75% of
    # the data for training and the remaining 25% for testing
    (train_x, test_x, train_y, test_y) = train_test_split(data, labels, test_size=0.25, random_state=42)
    # convert the labels from integers to vectors
    train_y = LabelBinarizer().fit_transform(train_y)
    test_y = LabelBinarizer().fit_transform(test_y)

    # initialize the optimizer and model
    print("[INFO] compiling model...")
    opt = SGD(lr=0.005)
    model = ShallowNet.build(width=32, height=32, depth=3, classes=3)
    model.compile(loss="categorical_crossentropy", optimizer=opt, metrics=["accuracy"])

    # train the network
    print("[INFO] training network...")
    model_fit = model.fit(train_x, train_y, validation_data=(test_x, test_y), batch_size=32, epochs=100, verbose=1)

    # evaluate the network
    print("[INFO] evaluating network...")
    predictions = model.predict(test_x, batch_size=32)
    print(
        classification_report(test_y.argmax(axis=1), predictions.argmax(axis=1), target_names=["cat", "dog", "panda"])
    )

    # plot the training loss and accuracy
    plt.style.use("ggplot")
    plt.figure()
    plt.plot(np.arange(0, 100), model_fit.history["loss"], label="train_loss")
    plt.plot(np.arange(0, 100), model_fit.history["val_loss"], label="val_loss")
    plt.plot(np.arange(0, 100), model_fit.history["acc"], label="train_acc")
    plt.plot(np.arange(0, 100), model_fit.history["val_acc"], label="val_acc")
    plt.title("Training Loss and Accuracy")
    plt.xlabel("Epoch #")
    plt.ylabel("Loss/Accuracy")
    plt.legend()
    plt.show()
Example #3
0
def pre_process_data(dimensions,image_paths):
	# construct the image generator for data augmentation
	aug = ImageDataGenerator(rotation_range=30, width_shift_range=0.1,
		height_shift_range=0.1, shear_range=0.2, zoom_range=0.2,
		horizontal_flip=True, fill_mode="nearest")
	# initialize the image preprocessors
	aap = AspectAwarePreprocessor(dimensions,dimensions)
	iap = ImageToArrayPreprocessor()
	# load the dataset from disk then scale the raw pixel intensities to
	# the range [0, 1]
	sdl = SimpleDatasetLoader(preprocessors=[aap, iap])
	(data, labels) = sdl.load(imagePaths, verbose=500)
	data = data.astype("float") / 255.0
	return data, labels
Example #4
0
def main():
    """Load pre-trained model from disk
    """
    # construct the argument parse and parse the arguments
    args = argparse.ArgumentParser()
    args.add_argument("-d",
                      "--dataset",
                      required=True,
                      help="path to input dataset")
    args.add_argument("-m",
                      "--model",
                      required=True,
                      help="path to pre-trained model")
    args = vars(args.parse_args())

    # initialize the class labels
    class_labels = ["cat", "dog", "panda"]

    # grab the list of images in the dataset then randomly sample indexes into the image paths list
    print("[INFO] sampling images...")
    image_paths = np.array(list(paths.list_images(args["dataset"])))
    idxs = np.random.randint(0, len(image_paths), size=(10, ))
    image_paths = image_paths[idxs]

    # initialize the image preprocessors
    simple_preprocessor = SimplePreprocessor(32, 32)
    image_to_array_preprocessor = ImageToArrayPreprocessor()

    # load the dataset from disk then scale the raw pixel intensities to the range [0, 1]
    dataset_loader = SimpleDatasetLoader(
        preprocessors=[simple_preprocessor, image_to_array_preprocessor])
    (data, _) = dataset_loader.load(image_paths)
    data = data.astype("float") / 255.0

    # load the pre-trained network
    print("[INFO] loading pre-trained network...")
    model = load_model(args["model"])

    # make predictions on the images
    print("[INFO] predicting...")
    preds = model.predict(data, batch_size=32).argmax(axis=1)
    # loop over the sample images
    for (i, image_path) in enumerate(image_paths):
        # load the example image, draw the prediction, and display it to our screen
        image = cv2.imread(image_path)
        cv2.putText(image, "Label: {}".format(class_labels[preds[i]]),
                    (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 0), 2)
        cv2.imshow("Image", image)
        cv2.waitKey(0)
Example #5
0
def main():
    """Run various regularization techniques.
    """
    # construct the argument parse and parse the arguments
    args = argparse.ArgumentParser()
    args.add_argument("-d",
                      "--dataset",
                      required=True,
                      help="path to input dataset")
    args = vars(args.parse_args())

    # grab the list of image paths
    print("[INFO] loading images...")
    image_paths = list(paths.list_images(args["dataset"]))

    # initialize the image preprocessor, load the dataset from disk,
    # and reshape the data matrix
    preprocessor = SimplePreprocessor(32, 32)
    loader = SimpleDatasetLoader(preprocessors=[preprocessor])
    (data, labels) = loader.load(image_paths, verbose=500)
    data = data.reshape((data.shape[0], 3072))

    # encode the labels as integers
    label_encoder = LabelEncoder()
    labels = label_encoder.fit_transform(labels)
    # partition the data into training and testing splits using 75% of
    # the data for training and the remaining 25% for testing
    (train_x, test_x, train_y, test_y) = train_test_split(data,
                                                          labels,
                                                          test_size=0.25,
                                                          random_state=5)

    # loop over our set of regularizers
    for regularizer in (None, "l1", "l2"):
        # train a SGD classifier using a softmax loss function and the
        # specified regularization function for 10 epochs
        print("[INFO] training model with `{}` penalty".format(regularizer))
        model = SGDClassifier(loss="log",
                              penalty=regularizer,
                              max_iter=10,
                              learning_rate="constant",
                              tol=1e-3,
                              eta0=0.01,
                              random_state=42)
        model.fit(train_x, train_y)
        # evaluate the classifier
        acc = model.score(test_x, test_y)
        print("[INFO] `{}` penalty accuracy: {:.2f}%".format(
            regularizer, acc * 100))
def separate_data(imagePaths, classNames):
    print("[INFO] Separating data.......................")
    iap = ImageToArrayPreprocessor()
    aap = AspectAwarePreprocessor(WIDTH, HEIGHT)
    sdl = SimpleDatasetLoader(preprocessors=[aap, iap])
    (data, labels) = sdl.load(imagePaths, verbose=500)
    data = data.astype("float") / 255.0

    # partition the data into training and testing splits using 75% of
    # the data for training and the remaining 25% for testing
    (trainX, testX, trainY, testY) = train_test_split(data, labels,
                                                      test_size=0.25, random_state=42)

    # convert the labels from integers to vectors
    trainY = LabelBinarizer().fit_transform(trainY)
    testY = LabelBinarizer().fit_transform(testY)

    if len(classNames) == 2:
        trainY = np.hstack((trainY, 1 - trainY))
        testY = np.hstack((testY, 1 - testY))

    return [trainX, testX, trainY, testY]
Example #7
0
def main():
    """Run image classification
    """
    # construct the argument parse and parse the arguments
    args = argparse.ArgumentParser()
    args.add_argument("-d",
                      "--dataset",
                      required=True,
                      help="path to input dataset")
    args = vars(args.parse_args())

    # grab the list of images that we'll be describing, then extract
    # the class label names from the image paths
    print("[INFO] loading images...")
    image_paths = list(paths.list_images(args["dataset"]))
    class_names = [pt.split(os.path.sep)[-2] for pt in image_paths]
    class_names = [str(x) for x in np.unique(class_names)]

    # initialize the image preprocessors
    aspect_aware_preprocessor = AspectAwarePreprocessor(64, 64)
    image_to_array_preprocessor = ImageToArrayPreprocessor()

    # load the dataset from disk then scale the raw pixel intensities to the range [0, 1]
    sdl = SimpleDatasetLoader(
        preprocessors=[aspect_aware_preprocessor, image_to_array_preprocessor])
    (data, labels) = sdl.load(image_paths, verbose=500)
    data = data.astype("float") / 255.0

    # partition the data into training and testing splits using 75% of
    # the data for training and the remaining 25% for testing
    (train_x, test_x, train_y, test_y) = train_test_split(data,
                                                          labels,
                                                          test_size=0.25,
                                                          random_state=42)

    # convert the labels from integers to vectors
    train_y = LabelBinarizer().fit_transform(train_y)
    test_y = LabelBinarizer().fit_transform(test_y)

    # construct the image generator for data augmentation
    aug = ImageDataGenerator(
        rotation_range=30,
        width_shift_range=0.1,
        height_shift_range=0.1,
        shear_range=0.2,
        zoom_range=0.2,
        horizontal_flip=True,
        fill_mode="nearest",
    )

    # initialize the optimizer and model
    print("[INFO] compiling model...")
    opt = SGD(lr=0.05)
    model = MiniVGGNet.build(width=64,
                             height=64,
                             depth=3,
                             classes=len(class_names))
    model.compile(loss="categorical_crossentropy",
                  optimizer=opt,
                  metrics=["accuracy"])

    # train the network
    print("[INFO] training network...")
    model_fit = model.fit_generator(
        aug.flow(train_x, train_y, batch_size=32),
        validation_data=(test_x, test_y),
        steps_per_epoch=len(train_x) // 32,
        epochs=100,
        verbose=1,
    )

    # evaluate the network
    print("[INFO] evaluating network...")
    predictions = model.predict(test_x, batch_size=32)
    print(
        classification_report(test_y.argmax(axis=1),
                              predictions.argmax(axis=1),
                              target_names=class_names))

    # plot the training loss and accuracy
    plt.style.use("ggplot")
    plt.figure()
    plt.plot(np.arange(0, 100), model_fit.history["loss"], label="train_loss")
    plt.plot(np.arange(0, 100),
             model_fit.history["val_loss"],
             label="val_loss")
    plt.plot(np.arange(0, 100), model_fit.history["acc"], label="train_acc")
    plt.plot(np.arange(0, 100), model_fit.history["val_acc"], label="val_acc")
    plt.title("Training Loss and Accuracy")
    plt.xlabel("Epoch #")
    plt.ylabel("Loss/Accuracy")
    plt.legend()
    plt.show()
import argparse

# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-d", "--dataset", required=True, help="path to input dataset")
args = vars(ap.parse_args())

# grab the list of image paths
print("[INFO] loading images...")
imgPaths = list(paths.list_images(args["dataset"]))

# initialize the image preprocessor, load the dataset from disk,
# and reshape the data matrix
sp = SimplePreprocessor(32, 32)
sdl = SimpleDatasetLoader(preprocessors=[sp])
(data, labels) = sdl.load(imagePaths=imgPaths, verbose=500)
data = data.reshape((data.shape[0], 3072))

# encode the labels as integers
le = LabelEncoder()
labels = le.fit_transform(labels)

# partition the data into training and testing splits using 75%
# of the data for training and the remaining 25% for testing
(trainX, testX, trainY, testY) = train_test_split(data,
                                                  labels,
                                                  test_size=0.25,
                                                  random_state=5)

# let's apply a few different types of regulation when training our SGDClassifier
# loop over our set of regularizers
ap.add_argument('-o',
                '--output',
                required=True,
                help='path to output for training monitor')
args = vars(ap.parse_args())

# grab the list of image paths
print('[INFO] loading images...')
image_paths = list(paths.list_images(args['dataset']))

# initialize the preprocessors
aap, iap = AspectAwarePreprocessor(64, 64), ImageToArrayPreprocessor()

# load images and scale it to range [0, 1]
sdl = SimpleDatasetLoader(preprocessors=[aap, iap])
data, labels = sdl.load(image_paths, verbose=500)
data = data.astype('float') / 255.

# partition data and label
trainX, testX, trainY, testY = train_test_split(data,
                                                labels,
                                                test_size=0.25,
                                                random_state=42)

# convert label to vector
le = LabelBinarizer()
trainY = le.fit_transform(trainY)
testY = le.transform(testY)
target_names = le.classes_

# monitor training
Example #10
0
# grab the list of images in the dataset then
# randomly sample inedxes into the image paths list
print("[INFO] sampling images...")
imagePaths = np.array(list(paths.list_images(args["dataset"])))
idxs = np.random.randint(0, len(imagePaths), size=(10, ))
imagePaths = imagePaths[idxs]

# initialize the image preprocessors
sp = SimplePreprocessor(32, 32)
iap = ImageToArrayPreprocessor()

# load the dataset from disk then scale the raw pixel intensities
# to the range [0, 1]
sdl = SimpleDatasetLoader(preprocessors=[sp, iap])
(data, labels) = sdl.load(imagePaths)
data = data.astype("float") / 255.0

# load the pre-trained network
print("[INFO] loading pre-trained network...")
model = load_model(args["model"])

# make predictions on the images
print("[INFO] predicting...")
preds = model.predict(data, batch_size=32).argmax(axis=1)

# loop over the sample images
for (i, imagePath) in enumerate(imagePaths):
    # load the example image, draw the prediction and display it
    # to out screen
    image = cv2.imread(imagePath)
Example #11
0
from imutils import paths
import argparse

# construct argument parse and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-d", "--dataset", required=True, help="path to input dataset")
args = vars(ap.parse_args())

# get the list of image paths
print("[INFO] loading images...")
imagePaths = list(paths.list_images(args["dataset"]))

# initialise the preprocessor, load dataset, and reshape data matrix
sp = SimplePreprocessor(32, 32)
sdl = SimpleDatasetLoader(preprocessors=[sp])
(data, labels) = sdl.load(imagePaths, verbose=1000)
data = data.reshape((data.shape[0], 3072))

# encode the labels as integers
le = LabelEncoder()
labels = le.fit_transform(labels)

# partition into train (75%) and test (25%) sets
(trainX, testX, trainY, testY) = train_test_split(data,
                                                  labels,
                                                  test_size=0.25,
                                                  random_state=5)

# loop over our set of regularizers
for r in (None, "l1", "l2"):
    # train a SGD classifier using the softmax loss function and the specified
Example #12
0
# initialize the class labels
class_labels = ['cat', 'dog', 'pandas']

# grab the list of image paths and randomly sample indexes into the image paths list
print('[INFO] sampling images...')
image_paths = np.array(list(paths.list_images(args['dataset'])))
idxs = np.random.randint(0, len(image_paths), size=10)
image_paths = image_paths[idxs]

# initialize the image preprocessors
sp = SimplePreprocessor(32, 32)
iap = ImageToArrayPreprocessor()

# load the dataset and scale the raw image to range [0, 1]
sdl = SimpleDatasetLoader(preprocessors=[sp, iap])
data, labels = sdl.load(image_paths)
data = data.astype('float') / 255

# load the pre-trained network
print('[INFO] loading pre-trained network...')
model = load_model(args['model'])

# make predictions on the images
print('[INFO] predicting...')
preds = model.predict(data, batch_size=32).argmax(axis=1)

# visualize the results
# loop over the sample images
for ii, image_path in enumerate(image_paths):
    # load the image, draw the prediction and display it to our screen
    image = cv2.imread(image_path)
Example #13
0
import matplotlib.pyplot as plt
import numpy as np
import argparse

ap = argparse.ArgumentParser()
ap.add_argument("-d", "--dataset", required=True, help="path to input dataset")
args = vars(ap.parse_args())

print("[INFO] loading images...")
imagePaths = list(paths.list_images(args["dataset"]))

sp = SimplePreprocessor(32, 32)
iap = ImageToArrayPreprocessor()

sdl = SimpleDatasetLoader(preprocessors=[sp, iap])
(data, labels) = sdl.load(imagePaths, varbose=500)
data = data.astype("float") /255.0

(trainX, testX, trainY, testY) = train_test_split(data, labels,38 test_size=0.25, random_state=42)
trainY = LabelBinarizer().fit_transform(trainY)
testY = LabelBinarizer().fit_transform(testY)

print("[INFO] compiling model...")
opt = SGD(lr=0.005)
model = ShallowNet.build(width=32, height=32, depth=3, classes=3)
model.compile(loss="categorical_crossentropy" , optimizer=opt,metrics=["accuracy"])
print("[INFO] training network...")

H = model.fit(trainX, trainY, validation_data=(testX, testY), batch_size=32, epochs=100, verbose=1)

print("[INFO] evaluating network...")
Example #14
0
def main():
    """Fine tune VGG16
    """
    # construct the argument parse and parse the arguments
    args = argparse.ArgumentParser()
    args.add_argument("-d",
                      "--dataset",
                      required=True,
                      help="path to input dataset")
    args.add_argument("-m",
                      "--model",
                      required=True,
                      help="path to output model")
    args = vars(args.parse_args())

    # construct the image generator for data augmentation
    augmentation = ImageDataGenerator(
        rotation_range=30,
        width_shift_range=0.1,
        height_shift_range=0.1,
        shear_range=0.2,
        zoom_range=0.2,
        horizontal_flip=True,
        fill_mode="nearest",
    )

    # grab the list of images that we'll be describing, then extract
    # the class label names from the image paths
    print("[INFO] loading images...")
    image_paths = list(paths.list_images(args["dataset"]))
    class_names = [pt.split(os.path.sep)[-2] for pt in image_paths]
    class_names = [str(x) for x in np.unique(class_names)]

    # initialize the image preprocessors
    aspect_aware_preprocessor = AspectAwarePreprocessor(224, 224)
    image_to_array_preprocessor = ImageToArrayPreprocessor()

    # load the dataset from disk then scale the raw pixel intensities to the range [0, 1]
    simple_dataset_loader = SimpleDatasetLoader(
        preprocessors=[aspect_aware_preprocessor, image_to_array_preprocessor])
    (data, labels) = simple_dataset_loader.load(image_paths, verbose=500)
    data = data.astype("float") / 255.0

    # partition the data into training and testing splits using 75% of
    # the data for training and the remaining 25% for testing
    (train_x, test_x, train_y, test_y) = train_test_split(data,
                                                          labels,
                                                          test_size=0.25,
                                                          random_state=42)
    # convert the labels from integers to vectors
    train_y = LabelBinarizer().fit_transform(train_y)
    test_y = LabelBinarizer().transform(test_y)

    # load the VGG16 network, ensuring the head FC layer sets are left off
    base_model = VGG16(weights="imagenet",
                       include_top=False,
                       input_tensor=Input(shape=(224, 224, 3)))

    # initialize the new head of the network, a set of FC layers followed by a softmax classifier
    head_model = FCHeadNet.build(base_model, len(class_names), 256)

    # place the head FC model on top of the base model -- this will
    # become the actual model we will train
    model = Model(inputs=base_model.input, outputs=head_model)

    # loop over all layers in the base model and freeze them so they
    # will *not* be updated during the training process
    for layer in base_model.layers:
        layer.trainable = False

    # compile our model (this needs to be done after our setting our layers to being non-trainable
    print("[INFO] compiling model...")
    opt = RMSprop(lr=0.001)
    model.compile(loss="categorical_crossentropy",
                  optimizer=opt,
                  metrics=["accuracy"])

    # train the head of the network for a few epochs (all other  layers are frozen) -- this will
    # allow the new FC layers to start to become initialized with actual "learned" values
    # versus pure random
    print("[INFO] training head...")
    model.fit_generator(
        augmentation.flow(train_x, train_y, batch_size=32),
        validation_data=(test_x, test_y),
        epochs=25,
        steps_per_epoch=len(train_x) // 32,
        verbose=1,
    )

    # evaluate the network after initialization
    print("[INFO] evaluating after initialization...")
    predictions = model.predict(test_x, batch_size=32)
    print(
        classification_report(test_y.argmax(axis=1),
                              predictions.argmax(axis=1),
                              target_names=class_names))

    # now that the head FC layers have been trained/initialized, lets
    # unfreeze the final set of CONV layers and make them trainable
    for layer in base_model.layers[15:]:
        layer.trainable = True

    # for the changes to the model to take affect we need to recompile
    # the model, this time using SGD with a *very* small learning rate
    print("[INFO] re-compiling model...")
    opt = SGD(lr=0.001)
    model.compile(loss="categorical_crossentropy",
                  optimizer=opt,
                  metrics=["accuracy"])

    # train the model again, this time fine-tuning *both* the final set
    # of CONV layers along with our set of FC layers
    print("[INFO] fine-tuning model...")
    model.fit_generator(
        augmentation.flow(train_x, train_y, batch_size=32),
        validation_data=(test_x, test_y),
        epochs=100,
        steps_per_epoch=len(train_x) // 32,
        verbose=1,
    )
    # evaluate the network on the fine-tuned model
    print("[INFO] evaluating after fine-tuning...")
    predictions = model.predict(test_x, batch_size=32)
    print(
        classification_report(test_y.argmax(axis=1),
                              predictions.argmax(axis=1),
                              target_names=class_names))

    # save the model to disk
    print("[INFO] serializing model...")
    model.save(args["model"])
Example #15
0
data_dict = data.set_index('id')['breed'].to_dict()

# le = LabelEncoder()
# labels = le.fit_transform(labels)
# labels_inv = le.inverse_transform(labels)
# classNames = [pt.split(os.path.sep)[-2] for pt in imagePaths]
# classNames = [str(x) for x in np.unique(classNames)]

# initialize the image preprocessors
aap = AspectAwarePreprocessor(config.INPUT_SIZE, config.INPUT_SIZE)
iap = ImageToArrayPreprocessor()

# load the dataset from disk then scale the raw pixel intensities to
# the range [0, 1]
sdl = SimpleDatasetLoader(preprocessors=[aap, iap])
(data, ids) = sdl.load(imagePaths, verbose=500)
labels = [data_dict[i] for i in ids]
data = data.astype("float") / 255.0

# partition the data into training and testing splits using 75% of
# the data for training and the remaining 25% for testing
(trainX, testX, trainY, testY) = train_test_split(data, labels,
	test_size=0.25, stratify=labels)

# convert the labels to vectors
trainY = LabelBinarizer().fit_transform(trainY)
testY = LabelBinarizer().fit_transform(testY)

# load the VGG16 network, ensuring the head FC layer sets are left
# off
baseModel = xception.Xception(weights="imagenet", include_top=False,
Example #16
0
from pyimagesearch.preprocessing import ImageToArrayPreprocessor
from pyimagesearch.preprocessing import AspectAwarePreprocessor
import numpy as np
from imutils import paths
from matplotlib import pyplot as plt
from keras import layers
import keras
import keras.backend as K
import cv2
from keras.models import Model, load_model

imagePaths = list(paths.list_images("train"))
aap = AspectAwarePreprocessor(256, 256)
#iap = ImageToArrayPreprocessor()
sdl = SimpleDatasetLoader(preprocessors=[aap])
(data, labels) = sdl.load(imagePaths, verbose=500)
print("[INFO] training data loaded")
# print(data.shape)
# plt.imshow(data[0])
# plt.show()

width = 256
height = 256
channels = 3

input_layer = layers.Input(name='input', shape=(height, width, channels))

# Encoder
x = layers.Conv2D(32, (5, 5),
                  strides=(1, 1),
                  padding='same',
Example #17
0
ap.add_argument("-k", "--neighbors", type=int, default=1, 
    help="# of nearest neighbors for classification")
ap.add_argument("-j", "--jobs", type=int, default=1, 
    help="# of jobs for kNN distance (-1 uses all available cores)")

args = vars(ap.parse_args())

# Grab the list of images that we will be describing
print("[INFO] loading images...")
imagePaths = list(paths.list_images(args["dataset"]))

# Initialize the image preprocessor, load the dataset from disk,
# and reshape the data matrix
sp = SimplePreprocessor(32, 32)
sdl = SimpleDatasetLoader(preprocessors=[sp])
(data, labels) = sdl.load(imagePaths, verbose=500)
data = data.reshape(data.shape[0], 3072)

# Show some information on memory consumption of the images
print("[INFO] features matrix: {:.1f}MB".format(
    data.nbytes / (1024 * 1000.0)
))

# Encode the labes as integers
le = LabelEncoder()
labels = le.fit_transform(labels)

# Partition the data into training and testing splits using 75% of 
# the data for training and the remaining 25% for testing
(trainX, testX, trainY, testY) = train_test_split(data, labels, test_size=0.25, random_state=42)