示例#1
0
    model = CRNN.build(width=config.WIDTH,
                       height=config.HEIGHT,
                       depth=1,
                       classes=config.NUM_CLASSES)
    #print(model.summary())
    model.compile(loss={'ctc': lambda y_true, y_pred: y_pred}, optimizer=adam)
else:
    print("[info] loading {}..".format(args["model"]))
    model = CRNN.build(width=config.WIDTH,
                       height=config.HEIGHT,
                       depth=1,
                       classes=config.NUM_CLASSES)
    model.load_weights(args["model"])

    model.compile(loss={'ctc': lambda y_true, y_pred: y_pred}, optimizer=adam)

callbacks = [
    EpochCheckpoint(args["checkpoints"], every=5, startAt=args["start_epoch"])
]

model.fit(trainGen.generator(),
          steps_per_epoch=trainGen.numImages // config.BATCH_SIZE,
          validation_data=valGen.generator(),
          validation_steps=valGen.numImages // config.BATCH_SIZE,
          epochs=30,
          max_queue_size=config.BATCH_SIZE * 2,
          callbacks=callbacks,
          verbose=1)
trainGen.close()
valGen.close()
示例#2
0
def training(aug, means_path, train_hdf5_path, val_hdf5_path, fig_path, json_path, label_encoder_path, best_weight_path, checkpoint_path, cross_val=None):
    # load RGB means
    means = json.loads(open(means_path).read())

    # initialize image preprocessors
    sp, mp, pp, iap = SimplePreprocessor(227, 227), MeanPreprocessor(means['R'], means['G'], means['B']), PatchPreprocessor(227, 227), ImageToArrayPreprocessor()

    # initialize training and validation image generator
    train_gen = HDF5DatasetGenerator(train_hdf5_path, config.BATCH_SIZE, preprocessors=[pp, mp, iap], aug=aug, classes=config.NUM_CLASSES)
    val_gen = HDF5DatasetGenerator(val_hdf5_path, config.BATCH_SIZE, preprocessors=[sp, mp, iap], aug=aug, classes=config.NUM_CLASSES)

    metrics = ['accuracy']
    if config.DATASET_TYPE == 'age':
        le = pickle.loads(open(label_encoder_path, 'rb').read())
        agh = AgeGenderHelper(config, deploy)
        one_off_mappings = agh.build_oneoff_mappings(le)

        one_off = OneOffAccuracy(one_off_mappings)
        metrics.append(one_off.one_off_accuracy)

    # construct callbacks
    callbacks = [TrainingMonitor(fig_path, json_path=json_path, start_at=args['start_epoch']), EpochCheckpoint(checkpoint_path, every=5, start_at=args['start_epoch']), ModelCheckpointsAdvanced(best_weight_path, json_path=json_path, start_at=args['start_epoch'])] #, LearningRateScheduler(decay)

    if cross_val is None:
        print('[INFO] compiling model...')
    else:
        print(f'[INFO] compiling model for cross validation {cross_val}...')
    
    if args['start_epoch'] == 0:
        if not os.path.exists(checkpoint_path):
            os.makedirs(checkpoint_path)
        model = AgeGenderNet.build(227, 227, 3, config.NUM_CLASSES, reg=5e-4)
        model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=metrics)
    else:
        model_path = os.path.sep.join([checkpoint_path, f"epoch_{args['start_epoch']}.hdf5"])
        print(f"[INFO] loading {model_path}...")
        if config.DATASET_TYPE == 'age':
            model = load_model(model_path, custom_objects={'one_off_accuracy': one_off.one_off_accuracy})
        elif config.DATASET_TYPE == 'gender':
            model = load_model(model_path)

        # update learning rate
        print(f'[INFO] old learning rate: {K.get_value(model.optimizer.lr)}')
        K.set_value(model.optimizer.lr, INIT_LR)
        print(f'[INFO] new learning rate: {K.get_value(model.optimizer.lr)}')

    # train the network
    if cross_val is None:
        print('[INFO] training the network...')
    else:
        print(f'[INFO] training the network for cross validation {cross_val}...')
    model.fit_generator(train_gen.generator(), steps_per_epoch=train_gen.num_images//config.BATCH_SIZE, validation_data=val_gen.generator(), validation_steps=val_gen.num_images//config.BATCH_SIZE, epochs=MAX_EPOCH-args['start_epoch'], verbose=2, callbacks=callbacks)

    # close dataset
    train_gen.close()
    val_gen.close()
示例#3
0
# otherwise, load the checkpoint from disk
else:
    print("[INFO] loading {}...".format(args["model"]))
    model = load_model(args["model"])

    # update the learning rate
    print("[INFO] old learning rate: {}".format(K.get_value(
        model.optimizer.lr)))
    K.set_value(model.optimizer.lr, 1e-5)
    print("[INFO] new learning rate: {}".format(K.get_value(
        model.optimizer.lr)))

# construct the set of callbacks
callbacks = [
    EpochCheckpoint(args["checkpoints"], every=5),
    TrainingMonitor(config.FIG_PATH,
                    jsonPath=config.JSON_PATH,
                    startAt=args["start_epoch"])
]

# train the network
model.fit_generator(trainGen.generator(),
                    steps_per_epoch=trainGen.numImages // 64,
                    validation_data=valGen.generator(),
                    validation_steps=valGen.numImages // 64,
                    epochs=10,
                    max_queue_size=64 * 2,
                    callbacks=callbacks,
                    verbose=1)
示例#4
0
    model.compile(optimizer='adam',
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])
# otherwise load the checkpoint from disk
else:
    print(f"[INFO] loading {args['model']}")
    model = load_model(args['model'])

    # update learning rate
    print(f'[INFO] old learning rate: {K.get_value(model.optimizer.lr)}')
    K.set_value(model.optimizer.lr, 1e-5)
    print(f'[INFO] new learning rate: {K.get_value(model.optimizer.lr)}')

# construct set of callbacks
callbacks = [
    EpochCheckpoint(args['checkpoints'], every=5,
                    start_at=args['start_epoch']),
    TrainingMonitor(config.FIG_PATH,
                    json_path=config.JSON_PATH,
                    start_at=args['start_epoch'])
]

# train the network
model.fit_generator(train_gen.generator(),
                    steps_per_epoch=train_gen.num_images // 64,
                    validation_data=val_gen.generator(),
                    validation_steps=val_gen.num_images // 64,
                    epochs=40,
                    verbose=2,
                    callbacks=callbacks)

# close the database
示例#5
0
def main():
    """Train ResNet on Cifar10 dataset
    """
    # construct the argument parse and parse the arguments
    args = argparse.ArgumentParser()
    args.add_argument("-c",
                      "--checkpoints",
                      required=True,
                      help="path to output checkpoint directory")
    args.add_argument("-m",
                      "--model",
                      type=str,
                      help="path to *specific* model checkpoint to load")
    args.add_argument("-s",
                      "--start-epoch",
                      type=int,
                      default=0,
                      help="epoch to restart training at")
    args = vars(args.parse_args())

    # load the training and testing data, converting the images from integers to floats
    print("[INFO] loading CIFAR-10 data...")
    ((train_x, train_y), (test_x, test_y)) = cifar10.load_data()
    train_x = train_x.astype("float")
    test_x = test_x.astype("float")

    # apply mean subtraction to the data
    mean = np.mean(train_x, axis=0)
    train_x -= mean
    test_x -= mean

    # convert the labels from integers to vectors
    label_binarizer = LabelBinarizer()
    train_y = label_binarizer.fit_transform(train_y)
    test_y = label_binarizer.transform(test_y)

    # construct the image generator for data augmentation
    aug = ImageDataGenerator(width_shift_range=0.1,
                             height_shift_range=0.1,
                             horizontal_flip=True,
                             fill_mode="nearest")

    # if there is no specific model checkpoint supplied, then initialize
    # the network (ResNet-56) and compile the model
    if args["model"] is None:
        print("[INFO] compiling model...")
        opt = SGD(lr=1e-1)
        model = ResNet.build(32,
                             32,
                             3,
                             10, (9, 9, 9), (64, 64, 128, 256),
                             reg=0.0005)
        model.compile(loss="categorical_crossentropy",
                      optimizer=opt,
                      metrics=["accuracy"])
    # otherwise, load the checkpoint from disk
    else:
        print("[INFO] loading {}...".format(args["model"]))
        model = load_model(args["model"])
        # update the learning rate
        print("[INFO] old learning rate: {}".format(
            K.get_value(model.optimizer.lr)))
        K.set_value(model.optimizer.lr, 1e-5)
        print("[INFO] new learning rate: {}".format(
            K.get_value(model.optimizer.lr)))

    # construct the set of callbacks
    callbacks = [
        EpochCheckpoint(args["checkpoints"],
                        every=5,
                        start_at=args["start_epoch"]),
        TrainingMonitor("output/resnet56_cifar10.png",
                        json_path="output/resnet56_cifar10.json",
                        start_at=args["start_epoch"])
    ]

    # train the network
    print("[INFO] training network...")
    model.fit_generator(aug.flow(train_x, train_y, batch_size=128),
                        validation_data=(test_x, test_y),
                        steps_per_epoch=len(train_x) // 128,
                        epochs=100,
                        callbacks=callbacks,
                        verbose=1)
示例#6
0
def main():
    """Train ResNet
    """
    # construct the argument parse and parse the arguments
    args = argparse.ArgumentParser()
    args.add_argument("-c",
                      "--checkpoints",
                      required=True,
                      help="path to output checkpoint directory")
    args.add_argument("-m",
                      "--model",
                      type=str,
                      help="path to *specific* model checkpoint to load")
    args.add_argument("-s",
                      "--start-epoch",
                      type=int,
                      default=0,
                      help="epoch to restart training at")
    args = vars(args.parse_args())

    # construct the training image generator for data augmentation
    aug = ImageDataGenerator(
        rotation_range=18,
        zoom_range=0.15,
        width_shift_range=0.2,
        height_shift_range=0.2,
        shear_range=0.15,
        horizontal_flip=True,
        fill_mode="nearest",
    )

    # load the RGB means for the training set
    means = json.loads(open(config.DATASET_MEAN).read())
    # initialize the image preprocessors
    simple_preprocessor = SimplePreprocessor(64, 64)
    mean_preprocessor = MeanPreprocessor(means["R"], means["G"], means["B"])
    image_to_array_preprocessor = ImageToArrayPreprocessor()

    # initialize the training and validation dataset generators
    train_gen = HDF5DatasetGenerator(
        config.TRAIN_HDF5,
        64,
        augmentation=aug,
        preprocessors=[
            simple_preprocessor, mean_preprocessor, image_to_array_preprocessor
        ],
        classes=config.NUM_CLASSES,
    )

    val_gen = HDF5DatasetGenerator(
        config.VAL_HDF5,
        64,
        preprocessors=[
            simple_preprocessor, mean_preprocessor, image_to_array_preprocessor
        ],
        classes=config.NUM_CLASSES,
    )

    # if there is no specific model checkpoint supplied, then initialize
    # the network and compile the model
    if args["model"] is None:
        print("[INFO] compiling model...")
        model = ResNet.build(64,
                             64,
                             3,
                             config.NUM_CLASSES, (3, 4, 6),
                             (64, 128, 256, 512),
                             reg=0.0005,
                             dataset="tiny_imagenet")
        opt = SGD(lr=1e-1, momentum=0.9)
        model.compile(loss="categorical_crossentropy",
                      optimizer=opt,
                      metrics=["accuracy"])

    # otherwise, load the checkpoint from disk
    else:
        print("[INFO] loading {}...".format(args["model"]))
        model = load_model(args["model"])
        # update the learning rate
        print("[INFO] old learning rate: {}".format(
            K.get_value(model.optimizer.lr)))
        K.set_value(model.optimizer.lr, 1e-5)
        print("[INFO] new learning rate: {}".format(
            K.get_value(model.optimizer.lr)))

    # construct the set of callbacks
    callbacks = [
        EpochCheckpoint(args["checkpoints"],
                        every=5,
                        start_at=args["start_epoch"]),
        TrainingMonitor(config.FIG_PATH,
                        json_path=config.JSON_PATH,
                        start_at=args["start_epoch"]),
    ]

    # train the network
    model.fit_generator(
        train_gen.generator(),
        steps_per_epoch=train_gen.num_images // 64,
        validation_data=val_gen.generator(),
        validation_steps=val_gen.num_images // 64,
        epochs=50,
        max_queue_size=10,
        callbacks=callbacks,
        verbose=1,
    )

    # close the databases
    train_gen.close()
    val_gen.close()