((trainX, trainY), (testX, testY)) = cifar10.load_data() trainX = trainX.astype("float") / 255.0 testX = testX.astype("float") / 255.0 lb = LabelBinarizer() trainY = lb.fit_transform(trainY) testY = lb.transform(testY) # 标签0-9代表的类别string labelNames = [ 'airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck' ] print("[INFO] compiling model...") opt = SGD(lr=0.01, decay=0.01 / 40, momentum=0.9, nesterov=True) model = MiniVGGNet.build(width=32, height=32, depth=3, classes=10) model.compile(loss="categorical_crossentropy", optimizer=opt, metrics=["accuracy"]) # 定义模型保存的模板 fname = os.path.sep.join( [args["weights"], "weights={epoch:03d}-{val_loss:.4f}.hdf5"]) # 调用keras的模块然后根据参数进行保存,具体的monitor mode等参数我觉得最后的学习方法就是直接去看原文档 checkpoint = ModelCheckpoint(fname, monitor="val_loss", mode="min", save_best_only=True, verbose=1) callbacks = [checkpoint] print("[INFO] training our network MiniVGG...")
data = data.reshape(data.shape[0], 28, 28, 1) (trainX, testX, trainY, testY) = train_test_split(data / 255.0, dataset.target.astype("int"), test_size=0.25, random_state=42) # 将label进行one-hot编码 lb = LabelBinarizer() trainY = lb.fit_transform(trainY) testY = lb.transform(testY) print("[INFO] compiling model...") callbacks = [LearningRateScheduler(step_decay)] # opt = SGD(lr=0.01, decay=0.01 / 70, momentum=0.9, nesterov=True) opt = SGD(lr=0.01, momentum=0.9, nesterov=True) model = MiniVGGNet.build(width=28, height=28, depth=1, classes=10) model.compile(loss="categorical_crossentropy", optimizer=opt, metrics=['accuracy']) print(model.summary()) print("[INFO] training network miniVGG...") H = model.fit(trainX, trainY, validation_data=(testX, testY), batch_size=64, epochs=70, callbacks=callbacks, verbose=1) # H = model.fit(trainX, trainY, validation_data=(testX, testY), batch_size=64, epochs=70, verbose=1) model.save(args["model"])
lr = 0.01 bs = 32 checkpoint_filepath = "./train/best_model.h5" log_dir = f"./logs/fit/miniVGGNET2_data_augm_weighted_{lr}_bs{bs}" callbacks = [ tf.keras.callbacks.TensorBoard(log_dir=log_dir, histogram_freq=1), tf.keras.callbacks.LearningRateScheduler(stepwise_scheduler, verbose=1), tf.keras.callbacks.ModelCheckpoint(filepath=checkpoint_filepath, monitor="val_loss", verbose=1, save_best_only=True), ] model = MiniVGGNet.build(width, height, depth, 2) # compile optimizer = tf.keras.optimizers.SGD(learning_rate=lr) model.compile(optimizer=optimizer, loss="binary_crossentropy", metrics=["accuracy"]) # fit with data augm h = model.fit(data_augm.flow(x=X_train, y=y_train, batch_size=bs), steps_per_epoch=len(X_train) // bs, epochs=epochs, verbose=1, validation_data=(X_test, y_test), shuffle=True, callbacks=callbacks, class_weight=classes_weight)
trainX = data.train.images trainY = data.train.labels testX = data.valid.images testY = data.vaild.labels # ((trainX, trainY), (testX, testY)) = cifar10.load_data() # trainX = trainX.astype("float") / 255.0 # testX = testX.astype("float") / 255.0 lb = LabelBinarizer() trainY = lb.fit_transform(trainY) testY = lb.transform(testY) print("[INFO] compiling model...") opt = SGD(lr=0.01, decay=0.01 / 70, momentum=0.9, nesterov=True) model = MiniVGGNet.build(width=48, height=48, depth=2, classes=2) model.compile(loss="categorical_crossentropy", optimizer=opt, metrics=['accuracy']) print(model.summary()) print("[INFO] training network Lenet-5") H = model.fit(trainX, trainY, validation_data=(testX, testY), batch_size=64, epochs=70, callbacks=callbacks, verbose=1) model.save(args["model"])
from lenet import LeNet from miniVGG import MiniVGGNet from keras.utils import plot_model model1 = LeNet.build(28, 28, 1, 10) plot_model(model1, to_file="lenet-5-mnist.png", show_shapes=True) model12 = LeNet.build(32, 32, 3, 10) plot_model(model12, to_file="lenet-5-cifar10.png", show_shapes=True) model13 = LeNet.build(32, 32, 3, 100) plot_model(model13, to_file="lenet-5-cifar100.png", show_shapes=True) model2 = MiniVGGNet.build(28, 28, 1, 10) plot_model(model2, to_file="miniVGGNet-mnist.png", show_shapes=True) model21 = MiniVGGNet.build(32, 32, 3, 10) plot_model(model21, to_file="miniVGGNet-cifar10.png", show_shapes=True) model22 = MiniVGGNet.build(32, 32, 3, 100) plot_model(model22, to_file="miniVGGNet-cifar100.png", show_shapes=True)
sdl = SimpleDatasetLoader(preprocessors=[aap, iap]) (data, labels) = sdl.load(imagePaths, verbose=500) data = data.astype("float") / 255.0 (trainX, testX, trainY, testY) = train_test_split(data, labels, test_size=0.25, random_state=42) trainY = LabelBinarizer().fit_transform(trainY) testY = LabelBinarizer().fit_transform(testY) aug = ImageDataGenerator(rotation_range=40, width_shift_range=0.1, height_shift_range=0.1, shear_range=0.2, zoom_range=0.2, horizontal_flip=True, fill_mode="nearest") print("[INFO] compiling model...") # opt = SGD(lr=0.05) opt = SGD(lr=0.05, decay=0.05 / 200, momentum=0.9, nesterov=True) model = MiniVGGNet.build(width=64, height=64, depth=3, classes=len(classNames)) model.compile(loss="categorical_crossentropy", optimizer=opt, metrics=["accuracy"]) checkpoint = ModelCheckpoint(args["weights"], monitor="val_loss", mode="min", save_best_only=True, verbose=1) callbacks = [checkpoint] print("[INFO] training network...") # H = model.fit(trainX, trainY, validation_data=(testX, testY), batch_size=32, epochs=100, verbose=1) H = model.fit_generator(aug.flow(trainX, trainY, batch_size=32), validation_data=(testX, testY), steps_per_epoch=len(trainX) // 32, epochs=200, verbose=1) print("[INFO] evaluating network...") preds = model.predict(testX, batch_size=32) print(classification_report(testY.argmax(axis=1), preds.argmax(axis=1), target_names=classNames)) # 保存可视化训练结果