data = np.array(data, dtype=float) / 255.0 labels = np.array(labels) (trainX, testX, trainY, testY) = train_test_split(data, labels, test_size=0.25, random_state=42) lb = LabelBinarizer().fit(trainY) trainY = lb.transform(trainY) testY = lb.transform(testY) print('[INFO] compiling the model.......') opt = SGD(lr=0.01) model = LeNet.build(width=28, height=28, depth=1, classes=9) model.compile(loss='categorical_crossentropy', optimizer=opt, metrics=['accuracy']) print('[INFO] training the model........') H = model.fit(trainX, trainY, validation_data=(testX, testY), batch_size=32, epochs=15, verbose=1) print('[INFO] evaluating the network.......') predictions = model.predict(testX, batch_size=32) print(
from pyimagesearch.nn.conv.lenet import LeNet from keras.utils import plot_model model = LeNet.build(28, 28, 1, 10) plot_model(model, to_file="lenet.png", show_shapes=True, show_layer_names=True)
import numpy as np import argparse import os # construct the argument parse and parse the arguments ap = argparse.ArgumentParser() ap.add_argument("-i", "--include-top", type=int, default=1, help="whether or not to include top of CNN") args = vars(ap.parse_args()) # load the VGG16 network, ensuring the head FC layer sets are left # off baseModel = VGG16(weights="imagenet", include_top=False, input_tensor=Input(shape=(224, 224, 3))) leModel = LeNet.build(64, 64, 3, 10) # initialize the new head of the network, a set of FC layers # followed by a softmax classifier output = Dense(100)(leModel.output) #headModel = FCHeadNet.build(baseModel, 4, 256) # place the head FC model on top of the model -- this will # become the actual model we will train model = Model(inputs=leModel.input, outputs=output) # loop over the layers in the network and display them to the # console for (i, layer) in enumerate(model.layers): print("[INFO] {}\t{}".format(i, layer.__class__.__name__))