示例#1
0
                                128,
                                aug=aug,
                                preprocessors=[pp, mp, iap],
                                classes=2)

valGen = HDF5DatasetGenerator(config.VAL_HDF5,
                              128,
                              preprocessors=[sp, mp, iap],
                              classes=2)

print("[INFO] compiling model...")
opt = Adam(lr=1e-3)

#pdb.set_trace()

model = AlexNet.build(width=227, height=227, depth=3, classes=2)

model.compile(loss="binary_crossentropy", optimizer=opt, metrics=["accuracy"])

path = os.path.sep.join([config.OUTPUT_PATH, "{}.png".format(os.getpid())])
callbacks = [TrainingMonitor(path)]

model.fit_generator(trainGen.generator(),
                    steps_per_epoch=trainGen.numImages // 128,
                    validation_data=valGen.generator(),
                    validation_steps=valGen.numImages // 128,
                    epochs=75,
                    max_queue_size=128 * 2,
                    callbacks=callbacks,
                    verbose=1)
示例#2
0
from pyimagesearch.io import HDF5DatasetGenerator
from pyimagesearch.preprocessing import SimplePreprocessor, MeanPreprocessor, PatchPreprocessor
from config import plant_seedlings_config as config
from pyimagesearch.nn.conv import AlexNet
from keras.preprocessing.image import ImageDataGenerator
import json
import pickle

mean = json.loads(open(config.DATASET_MEAN).read())
le = pickle.loads(open(config.LABEL_MAPPINGS, 'rb').read())

sp, mp, pp = SimplePreprocessor(224, 224), MeanPreprocessor(mean['R'], mean['G'], mean['B']), PatchPreprocessor(224, 224)

aug = ImageDataGenerator(rotation_range=30, width_shift_range=0.1, height_shift_range=0.1, shear_range=0.15, zoom_range=0.1, horizontal_flip=True)

train_gen = HDF5DatasetGenerator(config.TRAIN_HDF5, preprocessors=[pp, mp], aug=aug, batch_size=64, num_classes=len(le.classes_))
val_gen = HDF5DatasetGenerator(config.VAL_HDF5, preprocessors=[sp, mp], aug=aug, batch_size=64, num_classes=len(le.classes_))

model = AlexNet.build(224, 224, 3, len(le.classes_))
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])

model.fit(train_gen.generator(), steps_per_epoch=train_gen.num_images//64, epochs=100, verbose=2, )
示例#3
0
# initialize image preprocessors
sp = SimplePreprocessor(227, 227)
pp = PatchPreprocessor(227, 227)
mp = MeanPreprocessor(means['R'], means['G'], means['B'])
iap = ImageToArrayPreprocessor()

# initialize training and validation dataset generators
print('[INFO] loading dataset...')
train_gen = HDF5DatasetGenerator(config.TRAIN_HDF5, batch_size=64*config.G, preprocessors=[pp, mp, iap], aug=aug, classes=2)
val_gen = HDF5DatasetGenerator(config.VAL_HDF5, batch_size=64*config.G, preprocessors=[sp, mp, iap], classes=2)

# initialize the optimizer
if config.G <= 1:
    print(f'[INFO] compiling model with 1 GPU...')
    model = AlexNet.build(width=227, height=227, depth=3, classes=2, reg=0.0002)
else:
    print(f'[INFO] compiling model with {config.G} GPU...')
    # opt = Adam(lr=1e-3)
    with tf.device('/cpu:0'):
        model = AlexNet.build(width=227, height=227, depth=3, classes=2, reg=0.0002)
    model = multi_gpu_model(model, gpus=config.G)
model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])

# construct set of callbacks
path = os.path.sep.join([config.OUTPUT_PATH, f'{os.getpid()}.png'])
callbacks = [TrainingMonitor(path)]

# train the network
print('[INFO] training model...')
model.fit_generator(train_gen.generator(), steps_per_epoch=train_gen.num_images//(64*config.G), validation_data=val_gen.generator(), validation_steps=val_gen.num_images//(64*config.G), epochs=75, callbacks=callbacks, verbose=2)
示例#4
0
def main():
    """Train AlexNet on Dogs vs Cats
    """
    # construct the training image generator for data augmentation
    augmentation = ImageDataGenerator(
        rotation_range=20,
        zoom_range=0.15,
        width_shift_range=0.2,
        height_shift_range=0.2,
        shear_range=0.15,
        horizontal_flip=True,
        fill_mode="nearest",
    )

    # load the RGB means for the training set
    means = json.loads(open(config.DATASET_MEAN).read())
    # initialize the image preprocessors
    simple_preprocessor = SimplePreprocessor(227, 227)
    patch_preprocessor = PatchPreprocessor(227, 227)
    mean_preprocessor = MeanPreprocessor(means["R"], means["G"], means["B"])
    image_to_array_preprocessor = ImageToArrayPreprocessor()

    # initialize the training and validation dataset generators
    train_gen = HDF5DatasetGenerator(
        config.TRAIN_HDF5,
        128,
        augmentation=augmentation,
        preprocessors=[patch_preprocessor, mean_preprocessor, image_to_array_preprocessor],
        classes=2,
    )

    val_gen = HDF5DatasetGenerator(
        config.VAL_HDF5,
        128,
        preprocessors=[simple_preprocessor, mean_preprocessor, image_to_array_preprocessor],
        classes=2,
    )
    # initialize the optimizer
    print("[INFO] compiling model...")
    opt = Adam(lr=1e-3)
    model = AlexNet.build(width=227, height=227, depth=3, classes=2, regularization=0.0002)
    model.compile(loss="binary_crossentropy", optimizer=opt, metrics=["accuracy"])

    # construct the set of callbacks
    path = os.path.sep.join([config.OUTPUT_PATH, "{}.png".format(os.getpid())])
    callbacks = [TrainingMonitor(path)]

    # train the network
    model.fit_generator(
        train_gen.generator(),
        steps_per_epoch=train_gen.num_images // 128,
        validation_data=val_gen.generator(),
        validation_steps=val_gen.num_images // 128,
        epochs=75,
        max_queue_size=10,
        callbacks=callbacks,
        verbose=1,
    )

    # save the model to file
    print("[INFO] serializing model...")
    model.save(config.MODEL_PATH, overwrite=True)

    # close the HDF5 datasets
    train_gen.close()
    val_gen.close()