def main():
    args = option()

    # grab the list of image path
    print("[INFO] loading images ...")
    imagePaths = list(paths.list_images(args["dataset"]))

    # initialize the image preprocessor
    sp = SimplePreprocessor(32, 32)

    # load dataset from disk
    sdl = SimpleDatasetLoader(preprocessors=[sp])
    (data, labels) = sdl.load(imagePaths, verbose=500)

    # reshape th data matrix
    data = data.reshape((data.shape[0], 3072))

    # split training:75%, test:25%
    (trainX, testX, trainY, testY) = train_test_split(data,
                                                      labels,
                                                      test_size=0.25,
                                                      random_state=42)

    # loop over set of regularizers
    for r in (None, "l1", "l2"):

        # train a SGD classifier using a softmax loss function and
        # specified regularization function for 10 epochs
        print("[INFO] training model with ‘{}‘ penalty".format(r))
        model = SGDClassifier(loss="log",
                              penalty=r,
                              max_iter=10,
                              learning_rate="constant",
                              eta0=0.01,
                              random_state=42)
        model.fit(trainX, trainY)

        # evalute classifier
        acc = model.score(testX, testY)
        print("[INFO] ‘{}‘ penalty accuracy: {:.2f}%".format(r, acc * 100))
Пример #2
0
def main():
    args = option()

    # initialize class labels
    classLabels = ["cat", "dog", "panda"]

    # grab the list of images in the datatest
    print("[INFO] sampling images...")
    imagePaths = np.array(list(paths.list_images(args["datatest"])))

    # initialize the image preprocessors
    sp = SimplePreprocessor(32, 32)
    iap = ImageToArrayPreprocessor()

    # load the dataset from disk
    sdl = SimpleDatasetLoader(preprocessors=[sp, iap])
    (data, labels) = sdl.load(imagePaths)

    # cale the raw pixel intensities to the range [0, 1]
    data = data.astype("float") / 255.0

    # load pre-trained network
    print("[INFO] loading pre-trained network...")
    model = load_model(args["model"])

    # make predictions on the images
    print("[INFO] predicting...")
    preds = model.predict(data).argmax(axis=1)

    # loop over sample images
    for (i, imagePaths) in enumerate(imagePaths):
        # load the example image
        image = cv2.imread(imagePaths)

        # draw the prediction
        cv2.putText(image, "Label: {}".format(classLabels[preds[i]]), (10, 30),
                    cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 0), 2)

        # save reuslt
        cv2.imwrite("Chapter13/Image_Predict_{}.png".format(i), image)
Пример #3
0
def main():
    args = option()

    # grab the list of images
    print("[INFO] loading images...")
    imagePaths = list(paths.list_images(args["dataset"]))

    # initialize image preprocessor
    sp = SimplePreprocessor(32, 32)
    sdl = SimpleDatasetLoader(preprocessors=[sp])

    # load dataset from disk
    (data, labels) = sdl.load(imagePaths, verbose=500)

    # reshape the data matrix
    data = data.reshape((data.shape[0], 3072))

    # show info on memory consumption of images
    print("[INFO] feature matrix: {:.1f}MB".format(data.nbytes /
                                                   (1024 * 1000)))

    # convert labels from string to vertors
    lb = LabelBinarizer()
    labels = lb.fit_transform(labels)

    #split training:75%, test:25%
    (trainX, testX, trainY, testY) = train_test_split(data,
                                                      labels,
                                                      test_size=0.25,
                                                      random_state=42)

    # train and evaluate a k-NN classifier on the raw pixel intensities
    print("[INFO] evaluating k-NN classifier ...")
    model = KNeighborsClassifier(n_neighbors=args["neighbors"],
                                 n_jobs=args["jobs"])
    model.fit(trainX, trainY)
    prediction = model.predict(testX)
    print(classification_report(testY, prediction, target_names=lb.classes_))
Пример #4
0
def main():
    args = option()

    # construct the image generator for data augmentation
    aug = ImageDataGenerator(rotation_range=30,
                             width_shift_range=0.1,
                             height_shift_range=0.1,
                             shear_range=0.2,
                             zoom_range=0.2,
                             horizontal_flip=True,
                             fill_mode="nearest")

    # grab the list of images
    print("[INFO] loading images...")
    imagePaths = list(paths.list_images(args["dataset"]))

    classNames = [pt.split(os.path.sep)[-2] for pt in imagePaths]
    classNames = [str(x) for x in np.unique(classNames)]

    # initialize the image preprocessors
    aap = AspectAwarePreprocessor(64, 64)
    iap = ImageToArrayPreprocessor()

    # load the dataset from disk
    sdl = SimpleDatasetLoader(preprocessors=[aap, iap])
    (data, labels) = sdl.load(imagePaths, verbose=500)
    # scale the raw pixel intensities to the range [0, 1]
    data = data.astype("float") / 255.0

    # split training: 75%, testing: 25%
    (trainX, testX, trainY, testY) = train_test_split(data,
                                                      labels,
                                                      test_size=0.25,
                                                      random_state=42)

    # convert labels as vector
    lb = LabelBinarizer()
    trainY = lb.fit_transform(trainY)
    testY = lb.fit_transform(testY)

    # initialize the optimizer and model
    print("[INFO] compiling model...")
    opt = SGD(lr=0.05)
    model = MiniVGGNet.build(width=64,
                             height=64,
                             depth=3,
                             classes=len(classNames))
    model.compile(loss="categorical_crossentropy",
                  optimizer=opt,
                  metrics=["accuracy"])

    # train the network
    print("[INFO] training network...")
    H = model.fit_generator(aug.flow(trainX, trainY, batch_size=32),
                            validation_data=(testX, testY),
                            steps_per_epoch=len(trainX) // 32,
                            epochs=100,
                            verbose=1)

    # evaluate the network
    print("[INFO] evaluating network...")
    predictions = model.predict(testX, batch_size=32)
    print(
        classification_report(testY.argmax(axis=1),
                              predictions.argmax(axis=1),
                              target_names=classNames))

    # plot the training loss and accuracy
    plt.style.use("ggplot")
    plt.figure()
    plt.plot(np.arange(0, 100), H.history["loss"], label="train_loss")
    plt.plot(np.arange(0, 100), H.history["val_loss"], label="val_loss")
    plt.plot(np.arange(0, 100), H.history["accuracy"], label="train_acc")
    plt.plot(np.arange(0, 100), H.history["val_accuracy"], label="val_acc")
    plt.title("Training Loss and Accuracy")
    plt.xlabel("Epoch #")
    plt.ylabel("Loss/Accuracy")
    plt.legend()
    plt.savefig(args["output"])
Пример #5
0
                         zoom_range=0.2,
                         horizontal_flip=True,
                         fill_mode="nearest")

# grab the list of images
print("[INFO] loading images...")
imagePaths = list(paths.list_images('Dataset Leaf'))

classNames = [pt.split(os.path.sep)[-2] for pt in imagePaths]
classNames = [str(x) for x in np.unique(classNames)]

# initialize the image preprocessors
sp = SimplePreprocessor(224, 224)
iap = ImageToArrayPreprocessor()

sdl = SimpleDatasetLoader(preprocessors=[sp, iap])
(data, labels) = sdl.load(imagePaths, verbose=500)
data = data.astype("float") / 255.0

# partition the data into training:75% and testing:25%
(trainX, testX, trainY, testY) = train_test_split(data,
                                                  labels,
                                                  test_size=0.25,
                                                  random_state=42)

# convert the labels from integers to vectors
trainY = LabelBinarizer().fit_transform(trainY)
testY = LabelBinarizer().fit_transform(testY)

# load the VGG16 network, ensuring the head FC layer
# sets are left off
Пример #6
0
def main():
    args = option()

    # grab list of images
    print("[INFO] loading images...")
    imagePaths = list(paths.list_images(args["dataset"]))

    # initialize the image preprocessors
    sp = SimplePreprocessor(32, 32)
    iap = ImageToArrayPreprocessor()

    # load the dataset from disk
    sdl = SimpleDatasetLoader(preprocessors=[sp, iap])
    (data, labels) = sdl.load(imagePaths, verbose=500)

    # scale the raw pixel intensities to the range [0, 1]
    data = data.astype("float") / 255.0

    # split training: 75%, testing: 25%
    (trainX, testX, trainY, testY) = train_test_split(data,
                                                      labels,
                                                      test_size=0.25,
                                                      random_state=42)

    # convert labels as vector
    lb = LabelBinarizer()
    trainY = lb.fit_transform(trainY)
    testY = lb.fit_transform(testY)

    # initialize the optimizer and model
    print("[INFO] compiling model...")
    opt = SGD(lr=0.005)
    model = ShallowNet.build(width=32, height=32, depth=3, classes=3)
    model.compile(loss="categorical_crossentropy",
                  optimizer=opt,
                  metrics=["accuracy"])

    # train the network
    print("[INFO]training network ...")
    H = model.fit(trainX,
                  trainY,
                  validation_data=(testX, testY),
                  batch_size=32,
                  epochs=100,
                  verbose=1)

    # evaluate the network
    print("[INFO] evaluating network...")
    preds = model.predict(testX)
    print(
        classification_report(testY.argmax(axis=1),
                              preds.argmax(axis=1),
                              target_names=["cat", "dog", "panda"]))

    # plot the training loss and accuracy
    plt.style.use("ggplot")
    plt.figure()
    plt.plot(np.arange(0, 100), H.history["loss"], label="train_loss")
    plt.plot(np.arange(0, 100), H.history["val_loss"], label="val_loss")
    plt.plot(np.arange(0, 100), H.history["accuracy"], label="train_acc")
    plt.plot(np.arange(0, 100), H.history["val_accuracy"], label="val_acc")
    plt.title("Training Loss and Accuracy")
    plt.xlabel("Epoch #")
    plt.ylabel("Loss/Accuracy")
    plt.legend()
    plt.savefig(args["output"])
Пример #7
0
def main():
    args = option()

    # construct the image generator for data augmentation
    aug = ImageDataGenerator(rotation_range=30,
                             width_shift_range=0.1,
                             height_shift_range=0.1,
                             shear_range=0.2,
                             zoom_range=0.2,
                             horizontal_flip=True,
                             fill_mode="nearest")

    # grab the list of images
    print("[INFO] loading images...")
    imagePaths = list(paths.list_images(args['dataset']))

    classNames = [pt.split(os.path.sep)[-2] for pt in imagePaths]
    classNames = [str(x) for x in np.unique(classNames)]

    # initialize the image preprocessors
    sp = SimplePreprocessor(224, 224)
    iap = ImageToArrayPreprocessor()

    sdl = SimpleDatasetLoader(preprocessors=[sp, iap])
    (data, labels) = sdl.load(imagePaths, verbose=500)
    data = data.astype("float") / 255.0

    # partition the data into training:75% and testing:25%
    (trainX, testX, trainY, testY) = train_test_split(data,
                                                      labels,
                                                      test_size=0.25,
                                                      random_state=42)

    # convert the labels from integers to vectors
    trainY = LabelBinarizer().fit_transform(trainY)
    testY = LabelBinarizer().fit_transform(testY)

    # load the VGG16 network, ensuring the head FC layer
    # sets are left off
    baseModel = VGG16(weights="imagenet",
                      include_top=False,
                      input_tensor=Input(shape=(224, 224, 3)))

    # initialize the new head of the network, a set of FC layers
    # followed by a softmax classifier
    headModel = FCHeadNet.build(baseModel, len(classNames), 256)

    # place the head FC model on top of the base model,
    # become the actual model
    model = Model(inputs=baseModel.input, outputs=headModel)

    # loop over all layers in the base model and freeze them so they
    # will not be updated during the training process
    for layer in baseModel.layers:
        layer.trainable = False

    # compile our model (this needs to be done after our setting our
    # layers to being non-trainable)
    print("[INFO] compiling model...")
    opt = RMSprop(lr=0.001)
    model.compile(loss="categorical_crossentropy",
                  optimizer=opt,
                  metrics=["accuracy"])

    # train the head of the network for a few epochs (all other
    # layers are frozen)
    print("[INFO] training head...")
    #model.fit_generator(aug.flow(trainX, trainY, batch_size=32),
    #                    validation_data=(testX, testY), epochs=20,
    #                    steps_per_epoch=len(trainX) // 32, verbose=1)
    H = model.fit(trainX,
                  trainY,
                  validation_data=(testX, testY),
                  batch_size=32,
                  epochs=20,
                  verbose=1)

    # evaluate the network after initialization
    print("[INFO] evaluating after initialization...")
    predictions = model.predict(testX, batch_size=32)
    print(
        classification_report(testY.argmax(axis=1),
                              predictions.argmax(axis=1),
                              target_names=classNames))

    # unfreeze the final set of CONV layers and make them trainable
    for layer in baseModel.layers[15:]:
        layer.trainable = True

    #recompile the model
    print("[INFO] re-compiling model...")
    opt = SGD(lr=0.001)
    model.compile(loss="categorical_crossentropy",
                  optimizer=opt,
                  metrics=["accuracy"])

    # train the model again
    print("[INFO] fine-tuning model...")
    #model.fit_generator(aug.flow(trainX, trainY, batch_size=32),
    #                validation_data=(testX, testY), epochs=30,
    #                steps_per_epoch=len(trainX) // 32, verbose=1)
    H = model.fit(trainX,
                  trainY,
                  validation_data=(testX, testY),
                  batch_size=32,
                  epochs=32,
                  verbose=1)

    # save the network to disk
    print("[INFO] serializing network ...")
    model.save(args["model"])

    # evaluate the network
    print("[INFO] evaluating after fine-tuning...")
    predictions = model.predict(testX, batch_size=32)
    print(
        classification_report(testY.argmax(axis=1),
                              predictions.argmax(axis=1),
                              target_names=classNames))