コード例 #1
0
# import the necessary packages
from pyimagesearch.nn.conv import LeNet
from keras.utils import plot_model

# initialize LeNet and then write the network architecture
# visualization graph to disk
model = LeNet.build(28, 28, 1, 10)
plot_model(model, to_file="lenet.png", show_shapes=True)
コード例 #2
0
if K.image_data_format() == "channel_first":
    data = data.reshape(data.shape[0], 3, 32, 32)
else:
    data = data.reshape(data.shape[0], 32, 32, 3)

(trainX, testX, trainY, testY) = train_test_split(data,
                                                  labels,
                                                  test_size=0.25,
                                                  random_state=42)

trainY = LabelBinarizer().fit_transform(trainY)
testY = LabelBinarizer().fit_transform(testY)

print("[INFO] compiling model...")
opt = SGD(lr=0.01)
model = LeNet.build(width=32, height=32, depth=3, classes=3)
model.compile(loss="categorical_crossentropy",
              optimizer=opt,
              metrics=["accuracy"])

print("[INFO] training network...")
H = model.fit(trainX,
              trainY,
              validation_data=(testX, testY),
              batch_size=32,
              epochs=100,
              verbose=1)

print("[INFO] serializing network...")
model.save("./trained_lenet_animals.hdf5")
コード例 #3
0
data = np.array(data, dtype="float") / 255.0
labels = np.array(labels)

# partition the data into training and testing splits using 75% of
# the data for training and the remaining 25% for testing
(trainX, testX, trainY, testY) = train_test_split(data,
	labels, test_size=0.25, random_state=42)

# convert the labels from integers to vectors
lb = LabelBinarizer().fit(trainY)
trainY = lb.transform(trainY)
testY = lb.transform(testY)

# initialize the model
print("[INFO] compiling model...")
model = LeNet.build(width=28, height=28, depth=1, classes=9)
opt = SGD(lr=0.01)
model.compile(loss="categorical_crossentropy", optimizer=opt,
	metrics=["accuracy"])

# train the network
print("[INFO] training network...")
H = model.fit(trainX, trainY,  validation_data=(testX, testY),
	batch_size=32, epochs=15, verbose=1)

# evaluate the network
print("[INFO] evaluating network...")
predictions = model.predict(testX, batch_size=32)
print(classification_report(testY.argmax(axis=1),
	predictions.argmax(axis=1), target_names=lb.classes_))
コード例 #4
0
def main():
    """Train Smile CNN.
    """
    # construct the argument parse and parse the arguments
    args = argparse.ArgumentParser()
    args.add_argument("-d",
                      "--dataset",
                      required=True,
                      help="path to input dataset of faces")
    args.add_argument("-m",
                      "--model",
                      required=True,
                      help="path to output model")
    args = vars(args.parse_args())
    # initialize the list of data and labels
    data = []
    labels = []

    # loop over the input images
    for image_path in sorted(list(paths.list_images(args["dataset"]))):
        # load the image, pre-process it, and store it in the data list
        image = cv2.imread(image_path)
        image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
        image = imutils.resize(image, width=28)
        image = img_to_array(image)
        data.append(image)
        # extract the class label from the image path and update the labels list
        label = image_path.split(os.path.sep)[-3]
        label = "smiling" if label == "positives" else "not_smiling"
        labels.append(label)

    # scale the raw pixel intensities to the range [0, 1]
    data = np.array(data, dtype="float") / 255.0
    labels = np.array(labels)

    # convert the labels from integers to vectors
    label_encoder = LabelEncoder().fit(labels)
    labels = np_utils.to_categorical(label_encoder.transform(labels), 2)

    # account for skew in the labeled data
    class_totals = labels.sum(axis=0)
    class_weight = class_totals.max() / class_totals

    # partition the data into training and testing splits using 80% of
    # the data for training and the remaining 20% for testing
    (train_x, test_x, train_y, test_y) = train_test_split(data,
                                                          labels,
                                                          test_size=0.20,
                                                          stratify=labels,
                                                          random_state=42)
    # initialize the model
    print("[INFO] compiling model...")
    model = LeNet.build(width=28, height=28, depth=1, classes=2)
    model.compile(loss="binary_crossentropy",
                  optimizer="adam",
                  metrics=["accuracy"])

    # train the network
    print("[INFO] training network...")
    model_fit = model.fit(
        train_x,
        train_y,
        validation_data=(test_x, test_y),
        class_weight=class_weight,
        batch_size=64,
        epochs=15,
        verbose=1,
    )
    # evaluate the network
    print("[INFO] evaluating network...")
    predictions = model.predict(test_x, batch_size=64)
    print(
        classification_report(test_y.argmax(axis=1),
                              predictions.argmax(axis=1),
                              target_names=label_encoder.classes_))

    # save the model to disk
    print("[INFO] serializing network...")
    model.save(args["model"])

    # plot the training + testing loss and accuracy
    plt.style.use("ggplot")
    plt.figure()
    plt.plot(np.arange(0, 15), model_fit.history["loss"], label="train_loss")
    plt.plot(np.arange(0, 15), model_fit.history["val_loss"], label="val_loss")
    plt.plot(np.arange(0, 15), model_fit.history["acc"], label="acc")
    plt.plot(np.arange(0, 15), model_fit.history["val_acc"], label="val_acc")
    plt.title("Training Loss and Accuracy")
    plt.xlabel("Epoch #")
    plt.ylabel("Loss/Accuracy")
    plt.legend()
    plt.show()
コード例 #5
0
def main():
    """Train and evaluate LeNet on MNIST dataset.
    """
    # grab the MNIST dataset (if this is your first time using this
    # dataset then the 11MB download may take a minute)
    print("[INFO] accessing MNIST...")
    ((train_data, train_labels), (test_data, test_labels)) = mnist.load_data()

    # if we are using "channels first" ordering, then reshape the
    # design matrix such that the matrix is:
    # num_samples x depth x rows x columns
    if K.image_data_format() == "channels_first":
        train_data = train_data.reshape((train_data.shape[0], 1, 28, 28))
        test_data = test_data.reshape((test_data.shape[0], 1, 28, 28))
        # otherwise, we are using "channels last" ordering, so the design
        # matrix shape should be: num_samples x rows x columns x depth
    else:
        train_data = train_data.reshape((train_data.shape[0], 28, 28, 1))
        test_data = test_data.reshape((test_data.shape[0], 28, 28, 1))

    # scale data to the range of [0, 1]
    train_data = train_data.astype("float32") / 255.0
    test_data = test_data.astype("float32") / 255.0
    # convert the labels from integers to vectors
    label_binarizer = LabelBinarizer()
    train_labels = label_binarizer.fit_transform(train_labels)
    test_labels = label_binarizer.transform(test_labels)

    # initialize the optimizer and model
    print("[INFO] compiling model...")
    opt = SGD(lr=0.01)
    model = LeNet.build(width=28, height=28, depth=1, classes=10)
    model.compile(loss="categorical_crossentropy",
                  optimizer=opt,
                  metrics=["accuracy"])
    # train the network
    print("[INFO] training network...")
    model_fit = model.fit(train_data,
                          train_labels,
                          validation_data=(test_data, test_labels),
                          batch_size=128,
                          epochs=20,
                          verbose=1)

    # evaluate the network
    print("[INFO] evaluating network...")
    predictions = model.predict(test_data, batch_size=128)
    print(
        classification_report(
            test_labels.argmax(axis=1),
            predictions.argmax(axis=1),
            target_names=[str(x) for x in label_binarizer.classes_]))
    # plot the training loss and accuracy
    plt.style.use("ggplot")
    plt.figure()
    plt.plot(np.arange(0, 20), model_fit.history["loss"], label="train_loss")
    plt.plot(np.arange(0, 20), model_fit.history["val_loss"], label="val_loss")
    plt.plot(np.arange(0, 20), model_fit.history["acc"], label="train_acc")
    plt.plot(np.arange(0, 20), model_fit.history["val_acc"], label="val_acc")
    plt.title("Training Loss and Accuracy")
    plt.xlabel("Epoch #")
    plt.ylabel("Loss/Accuracy")
    plt.legend()
    plt.show()
コード例 #6
0
ファイル: train_model.py プロジェクト: lykhahaha/Mine
# scale raw pixel to [0, 1]
data = np.array(data, dtype='float')/255
labels = np.array(labels)

# partition data into 75% and 25%
trainX, testX, trainY, testY = train_test_split(data, labels, test_size=0.25, random_state=42)

# convert labels to vectors
le = LabelBinarizer()
trainY = le.fit_transform(trainY)
testY = le.transform(testY)

# initialize optimizer and model
print('[INFO] compiling model...')
model = LeNet.build(width=28, height=28, depth=1, classes=len(le.classes_))
opt = SGD(lr=0.01)
model.compile(optimizer=opt, loss='categorical_crossentropy', metrics=['accuracy'])

# train network
print('[INFO] training network...')
H = model.fit(trainX, trainY, validation_data=(testX, testY), batch_size=32, epochs=15, verbose=2)

# evaluating network
print('[INFO] evaluating model...')
predictions = model.predict(testX, batch_size=32)
print(classification_report(testY.argmax(axis=1), predictions.argmax(axis=1), target_names=le.classes_))

# save model to disk
print('[INFO] serializing network...')
model.save(args['model'])
コード例 #7
0
def main():
    """Train LeNet model on the image captcha dataset
    """
    # construct the argument parse and parse the arguments
    args = argparse.ArgumentParser()
    args.add_argument("-d",
                      "--dataset",
                      required=True,
                      help="path to input dataset")
    args.add_argument("-m",
                      "--model",
                      required=True,
                      help="path to output model")
    args = vars(args.parse_args())

    # initialize the data and labels
    data = []
    labels = []

    # loop over the input images
    for image_path in paths.list_images(args["dataset"]):
        # load the image, pre-process it, and store it in the data list
        image = cv2.imread(image_path)
        image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
        image = preprocess(image, 28, 28)
        image = img_to_array(image)
        data.append(image)

        # extract the class label from the image path and update the labels list
        label = image_path.split(os.path.sep)[-2]
        labels.append(label)

    # scale the raw pixel intensities to the range [0, 1]
    data = np.array(data, dtype="float") / 255.0
    labels = np.array(labels)

    # partition the data into training and testing splits using 75% of
    # the data for training and the remaining 25% for testing
    (train_x, test_x, train_y, test_y) = train_test_split(data,
                                                          labels,
                                                          test_size=0.25,
                                                          random_state=42)
    # convert the labels from integers to vectors
    label_binarizer = LabelBinarizer().fit(train_y)
    train_y = label_binarizer.transform(train_y)
    test_y = label_binarizer.transform(test_y)

    # initialize the model
    print("[INFO] compiling model...")
    model = LeNet.build(width=28, height=28, depth=1, classes=9)
    opt = SGD(lr=0.01)
    model.compile(loss="categorical_crossentropy",
                  optimizer=opt,
                  metrics=["accuracy"])

    # train the network
    print("[INFO] training network...")
    model_fit = model.fit(train_x,
                          train_y,
                          validation_data=(test_x, test_y),
                          batch_size=32,
                          epochs=15,
                          verbose=1)

    # evaluate the network
    print("[INFO] evaluating network...")
    predictions = model.predict(test_x, batch_size=32)
    print(
        classification_report(test_y.argmax(axis=1),
                              predictions.argmax(axis=1),
                              target_names=label_binarizer.classes_))
    # save the model to disk
    print("[INFO] serializing network...")
    model.save(args["model"])

    # plot the training + testing loss and accuracy
    plt.style.use("ggplot")
    plt.figure()
    plt.plot(np.arange(0, 15), model_fit.history["loss"], label="train_loss")
    plt.plot(np.arange(0, 15), model_fit.history["val_loss"], label="val_loss")
    plt.plot(np.arange(0, 15), model_fit.history["acc"], label="acc")
    plt.plot(np.arange(0, 15), model_fit.history["val_acc"], label="val_acc")
    plt.title("Training Loss and Accuracy")
    plt.xlabel("Epoch #")
    plt.ylabel("Loss/Accuracy")
    plt.legend()
    plt.show()
コード例 #8
0
ファイル: lenet_mnist.py プロジェクト: lykhahaha/Mine
    testX = testX.reshape((len(testX), 28, 28, 1))

# scale it to range [0, 1]
trainX = trainX.astype('float32') / 255
testX = testX.astype('float32') / 255
target_names = [str(x) for x in list(range(10))]

# convert label to vector
lb = LabelBinarizer()
trainY = lb.fit_transform(trainY)
testY = lb.transform(testY)

# initialize the optimizer and network
print('[INFO] compiling the network...')
opt = SGD(lr=0.01)
model = LeNet.build(width=28, height=28, depth=1, classes=len(target_names))
model.compile(opt, loss='categorical_crossentropy', metrics=['accuracy'])

# training the network
print('[INFO] training the network...')
H = model.fit(trainX,
              trainY,
              validation_data=(testX, testY),
              batch_size=128,
              epochs=20,
              verbose=2)

# evaluate the network
print('[INFO] evaluating the network...')
preds = model.predict(testX, batch_size=128)
print(
コード例 #9
0
def main():
    """Visualize network architecture.
    """
    # initialize LeNet and then write the network architecture visualization graph to disk
    model = LeNet.build(28, 28, 1, 10)
    plot_model(model, to_file="lenet.png", show_shapes=True)
コード例 #10
0
ファイル: lenet_train.py プロジェクト: abishek21/TamilLens
trainY = LabelBinarizer().fit_transform(trainY)
testY = LabelBinarizer().fit_transform(testY)

# construct the image generator for data augmentation
aug = ImageDataGenerator(rotation_range=30,
                         width_shift_range=0.1,
                         height_shift_range=0.1,
                         shear_range=0.2,
                         zoom_range=0.2,
                         horizontal_flip=True,
                         fill_mode="nearest")

# initialize the optimizer and model
print("[INFO] compiling model...")
opt = SGD(lr=0.05)
model = LeNet.build(width=64, height=64, depth=3, classes=len(classNames))

# model.compile(loss="categorical_crossentropy", optimizer=opt,
# metrics=["accuracy"])
#
# # # Construct the callback to save only the 'best' model to disk based on the validation loss
# checkpoint = ModelCheckpoint('./checkpoint_dataagu/checkpoint.hdf5', monitor="val_loss", mode="min", save_best_only=True, verbose=1)
# callbacks = [checkpoint]
#
#
# # train the network
# print("[INFO] training network...")
# #H= model.fit(trainX, trainY, validation_data=(testX, testY),batch_size=32, epochs=30,callbacks=callbacks, verbose=1)
# H = model.fit_generator(aug.flow(trainX, trainY, batch_size=32),validation_data=(testX, testY), steps_per_epoch=len(trainX) // 32,
# epochs=100,callbacks=callbacks,verbose=1)