# construct the image generator for data augmentation aug = ImageDataGenerator(rotation_range=0, width_shift_range=0.1, height_shift_range=0.1, shear_range=0.2, zoom_range=0.2, horizontal_flip=True, fill_mode="nearest") # initialize the model using a sigmoid activation as the final layer # in the network so we can perform multi-label classification print("[INFO] compiling model...") model = SmallerVGGNet.build(width=IMAGE_DIMS[1], height=IMAGE_DIMS[0], depth=IMAGE_DIMS[2], classes=len(mlb.classes_), finalAct="sigmoid") print(model.summary()) # initialize the optimizer (SGD is sufficient) opt = Adam(lr=INIT_LR, decay=INIT_LR / EPOCHS) # compile the model using binary cross-entropy rather than # categorical cross-entropy -- this may seem counterintuitive for # multi-label classification, but keep in mind that the goal here # is to treat each output label as an independent Bernoulli # distribution model.compile(loss="binary_crossentropy", optimizer=opt, metrics=["accuracy"])
test_size=0.2, random_state=42) # construct the image generator for data augmentation aug = ImageDataGenerator(rotation_range=25, width_shift_range=0.1, height_shift_range=0.1, shear_range=0.2, zoom_range=0.2, horizontal_flip=True, fill_mode="nearest") # initialize the model print("[INFO] compiling model...") model = SmallerVGGNet.build(width=IMAGE_DIMS[1], height=IMAGE_DIMS[0], depth=IMAGE_DIMS[2], classes=len(lb.classes_)) opt = Adam(lr=INIT_LR, decay=INIT_LR / EPOCHS) model.compile(loss="categorical_crossentropy", optimizer=opt, metrics=["accuracy"]) # train the network print("[INFO] training network...") H = model.fit_generator(aug.flow(trainX, trainY, batch_size=BS), validation_data=(testX, testY), steps_per_epoch=len(trainX) // BS, epochs=EPOCHS, verbose=1) # save the model to disk
random_state=42) trainY = to_categorical(trainY, num_classes=2) testY = to_categorical(testY, num_classes=2) # augmenting dataset aug = ImageDataGenerator(rotation_range=25, width_shift_range=0.1, height_shift_range=0.1, shear_range=0.2, zoom_range=0.2, horizontal_flip=True, fill_mode="nearest") # build model model = SmallerVGGNet.build(width=img_dims[0], height=img_dims[1], depth=img_dims[2], classes=2) # compile the model opt = Adam(lr=lr, decay=lr / epochs) model.compile(loss="binary_crossentropy", optimizer=opt, metrics=["accuracy"]) # train the model H = model.fit_generator(aug.flow(trainX, trainY, batch_size=batch_size), validation_data=(testX, testY), steps_per_epoch=len(trainX) // batch_size, epochs=epochs, verbose=1) # save the model to disk model.save(args.model)
(x_train, x_test, y_train, y_test) = train_test_split(data, labels, test_size=0.2, random_state=69) augment = ImageDataGenerator(zoom_range=0.2, horizontal_flip=True, rotation_range=30, width_shift_range=0.1, height_shift_range=0.1, shear_range=0.2, fill_mode="nearest") print("[INFO] Compiling model...") model = SmallerVGGNet.build(w=IMAGE_SHAPE[1], h=IMAGE_SHAPE[0], d=IMAGE_SHAPE[2], classes=len(lb.classes_)) optimizer = Adam(lr=LEARNING_RATE, decay=LEARNING_RATE / EPOCHS) model.compile(loss="categorical_crossentropy", optimizer=optimizer, metrics=["accuracy"]) print("[INFO] Training network...") history = model.fit_generator(augment.flow(x_train, y_train, batch_size=BATCH_SIZE), validation_data=(x_test, y_test), steps_per_epoch=len(x_train) // BATCH_SIZE, epochs=EPOCHS, verbose=1)