def PredictNumber():

    classlabels = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]

    print("[INFO] sampling imags")

    imagePaths = np.array(list(paths.list_images("media")))

    idxs = np.random.randint(0, len(imagePaths), size=(10,))
    print(idxs)
    imagePaths = imagePaths[[0]]
    sp = SimplePreprocessor(28, 28)
    iap = ImageToArrayPreprocessor()

    sdl = SimpleDataLoader(preprocessor=[sp, iap])
    (data, labels) = sdl.load(imagePaths)
    data = data.astype("float") / 255.0

    print("[INFO] loading pre-trained network...")
    model = load_model("model/lenet_model.h5")

    print("[INFO] predicting....")

    preds = model.predict(data, batch_size=32).argmax(axis=1)



    return classlabels[preds[0]]
Beispiel #2
0
from keras.optimizers import SGD
from preprocessing.imagetoarraypreprocessor import ImageToArrayPreprocessor
from preprocessing.simplepreprocessor import SimplePreprocessor
from datasets.SimpleDatasetLoader import SimpleDatasetLoader
from conv import MiniVGGNet
from imutils import paths
import numpy as np
from Grafricacion import graficacion

data = []
epocas = 100

print("[INFO] preprocessing images ...")
imagePaths = list(paths.list_images("NumerosLetras"))
sp = SimplePreprocessor(32, 32, inter=cv2.INTER_CUBIC)
iap = ImageToArrayPreprocessor()
sdl = SimpleDatasetLoader(preprocessors=[sp, iap])
(data, labels) = sdl.load(imagePaths, gray=1)
data = data.astype("float") / 255.0
lb = LabelBinarizer()
classLabels = [
    '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'A', 'B', 'C', 'D', 'E',
    'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T',
    'U', 'V', 'W', 'X', 'Y', 'Z'
]

print("[INFO] partition data ...")
(trainX, testX, trainY, testY) = train_test_split(data,
                                                  labels,
                                                  test_size=0.2,
                                                  random_state=45)
Beispiel #3
0
argument_parser.add_argument('-d',
                             '--dataset',
                             required=True,
                             help='Path to input dataset.')
argument_parser.add_argument('-m',
                             '--model',
                             required=True,
                             help='Path to the output model.')

arguments = vars(argument_parser.parse_args())

print('[INFO] Loading images...')
image_paths = list(paths.list_images(arguments['dataset']))

simple_preprocessor = SimplePreprocessor(32, 32)
image_to_array_preprocessor = ImageToArrayPreprocessor()

simple_dataset_loader = SimpleDatasetLoader(
    preprocessors=[simple_preprocessor, image_to_array_preprocessor])
data, labels = simple_dataset_loader.load(image_paths, verbose=500)
data = data.astype('float') / 255.0

X_train, X_test, y_train, y_test = train_test_split(data,
                                                    labels,
                                                    test_size=.25,
                                                    random_state=42)

y_train = LabelBinarizer().fit_transform(y_train)
y_test = LabelBinarizer().fit_transform(y_test)

print('[INFO] Compiling model...')
import numpy as np
import argparse
import os

ap = argparse.ArgumentParser()
ap.add_argument("-d", "--dataset", required=True,
help="Path to the dataset")
args = vars(ap.parse_args())

print("[INFO] loading images...")
imagePaths = list(paths.list_images(args["dataset"]))
classNames = [pt.split(os.path.sep) for pt in imagePaths]
classNames = [str(x) for x in np.unique(classNames)]

aap = AspectAwarePreprocessor(64, 64)
iap = ImageToArrayPreprocessor()
print(iap.preprocess(imagePaths))
sdl = SimpleDatasetLoader(preprocessors=[aap, iap])
(data, labels) = sdl.load(imagePaths, verbose=500)
data = data.astype("float") / 255.0

(trainX, testX, trainY, test Y) = train_test_split(data, labels, 
test_size=0.25, random_state=42)

trainY = LabelBinarizer().fit_transform(trainY)
testY = LabelBinarizer().fit_transform(testY)

print("[INFO] compling model...")
opt = SGD(lr=0.05)
model = MiniVGGNet.build(width=64, height=64, depth=3,
classes=len(classNames))
Beispiel #5
0
from preprocessing.simplepreprocessor import SimplePreprocessor
from preprocessing.meanpreprocessor import MeanPreprocessor
from preprocessing.croppreprocessor import CropPreprocessor
from hdf5.hdf5datasetgenerator import HDF5DatasetGenerator
from utils.rank import rank5_accuracy
from keras.models import load_model
import numpy as np
import progressbar
import json

means = json.loads(open(config.DATASET_MEAN).read())

sp = SimplePreprocessor(config.IMAGE_SIZE, config.IMAGE_SIZE)
mp = MeanPreprocessor(means["R"], means["G"], means["B"])
cp = CropPreprocessor(config.IMAGE_SIZE, config.IMAGE_SIZE)
iap = ImageToArrayPreprocessor()

print("[INFO] loading model...")
model = load_model(config.MODEL_PATH)

print("[INFO] predicting on test data...")

testGen = HDF5DatasetGenerator(config.TEST_HDF5,
                               config.BATH_SIZE,
                               preprocessors=[mp],
                               classes=3)
predictions = []

widgets = [
    "Evaluating: ",
    progressbar.Percentage(), " ",
Beispiel #6
0
def main():
    args = option()

    # construct the image generator for data augmentation
    aug = ImageDataGenerator(rotation_range=30,
                             width_shift_range=0.1,
                             height_shift_range=0.1,
                             shear_range=0.2,
                             zoom_range=0.2,
                             horizontal_flip=True,
                             fill_mode="nearest")

    # grab the list of images
    print("[INFO] loading images...")
    imagePaths = list(paths.list_images(args['dataset']))

    classNames = [pt.split(os.path.sep)[-2] for pt in imagePaths]
    classNames = [str(x) for x in np.unique(classNames)]

    # initialize the image preprocessors
    sp = SimplePreprocessor(224, 224)
    iap = ImageToArrayPreprocessor()

    sdl = SimpleDatasetLoader(preprocessors=[sp, iap])
    (data, labels) = sdl.load(imagePaths, verbose=500)
    data = data.astype("float") / 255.0

    # partition the data into training:75% and testing:25%
    (trainX, testX, trainY, testY) = train_test_split(data,
                                                      labels,
                                                      test_size=0.25,
                                                      random_state=42)

    # convert the labels from integers to vectors
    trainY = LabelBinarizer().fit_transform(trainY)
    testY = LabelBinarizer().fit_transform(testY)

    # load the VGG16 network, ensuring the head FC layer
    # sets are left off
    baseModel = VGG16(weights="imagenet",
                      include_top=False,
                      input_tensor=Input(shape=(224, 224, 3)))

    # initialize the new head of the network, a set of FC layers
    # followed by a softmax classifier
    headModel = FCHeadNet.build(baseModel, len(classNames), 256)

    # place the head FC model on top of the base model,
    # become the actual model
    model = Model(inputs=baseModel.input, outputs=headModel)

    # loop over all layers in the base model and freeze them so they
    # will not be updated during the training process
    for layer in baseModel.layers:
        layer.trainable = False

    # compile our model (this needs to be done after our setting our
    # layers to being non-trainable)
    print("[INFO] compiling model...")
    opt = RMSprop(lr=0.001)
    model.compile(loss="categorical_crossentropy",
                  optimizer=opt,
                  metrics=["accuracy"])

    # train the head of the network for a few epochs (all other
    # layers are frozen)
    print("[INFO] training head...")
    #model.fit_generator(aug.flow(trainX, trainY, batch_size=32),
    #                    validation_data=(testX, testY), epochs=20,
    #                    steps_per_epoch=len(trainX) // 32, verbose=1)
    H = model.fit(trainX,
                  trainY,
                  validation_data=(testX, testY),
                  batch_size=32,
                  epochs=20,
                  verbose=1)

    # evaluate the network after initialization
    print("[INFO] evaluating after initialization...")
    predictions = model.predict(testX, batch_size=32)
    print(
        classification_report(testY.argmax(axis=1),
                              predictions.argmax(axis=1),
                              target_names=classNames))

    # unfreeze the final set of CONV layers and make them trainable
    for layer in baseModel.layers[15:]:
        layer.trainable = True

    #recompile the model
    print("[INFO] re-compiling model...")
    opt = SGD(lr=0.001)
    model.compile(loss="categorical_crossentropy",
                  optimizer=opt,
                  metrics=["accuracy"])

    # train the model again
    print("[INFO] fine-tuning model...")
    #model.fit_generator(aug.flow(trainX, trainY, batch_size=32),
    #                validation_data=(testX, testY), epochs=30,
    #                steps_per_epoch=len(trainX) // 32, verbose=1)
    H = model.fit(trainX,
                  trainY,
                  validation_data=(testX, testY),
                  batch_size=32,
                  epochs=32,
                  verbose=1)

    # save the network to disk
    print("[INFO] serializing network ...")
    model.save(args["model"])

    # evaluate the network
    print("[INFO] evaluating after fine-tuning...")
    predictions = model.predict(testX, batch_size=32)
    print(
        classification_report(testY.argmax(axis=1),
                              predictions.argmax(axis=1),
                              target_names=classNames))
from dogs_vs_cats.config import dogs_vs_cat_config as config
from inout.hdf5datasetgenerator import HDF5DatasetGenerator
from preprocessing.croppreprocessor import CropPreprocessor
from preprocessing.imagetoarraypreprocessor import ImageToArrayPreprocessor
from preprocessing.meanpreprocessor import MeanPreprocessor
from preprocessing.simplepreprocessor import SimplePreprocessor
from utils.ranked import rank5_accuracy

with open(config.DATASET_MEAN, 'r') as f:
    means = json.loads(f.read())

simple_preprocessor = SimplePreprocessor(227, 227)
mean_preprocessor = MeanPreprocessor(means['R'], means['G'], means['B'])
crop_preprocessor = CropPreprocessor(227, 227)
image_to_array_preprocessor = ImageToArrayPreprocessor()

print('[INFO] Loading model...')
model = load_model(config.MODEL_PATH)

print('[INFO] Predicting on test data (no crops)...')
test_generator = HDF5DatasetGenerator(config.TEST_HDF5, 64, preprocessors=[simple_preprocessor, mean_preprocessor,
                                                                           image_to_array_preprocessor], classes=2)
predictions = model.predict_generator(test_generator.generator(), steps=test_generator.num_images // 64,
                                      max_queue_size=10)

rank1, _ = rank5_accuracy(predictions, test_generator.db['labels'])
print(f'[INFO] rank-1: {rank1 * 100: .2f}%')
test_generator.close()

# Let's now test with oversampling