print("[Info] compiling model...")
    opt = SGD(lr=1e-1)
    model = ResNet.build(32, 32, 3, 10, (9, 9, 9),
               (64, 64, 128, 256), reg=0.0005)
    model.compile(loss="categorical_crossentropy", optimizer=opt,
         metrics=["accuracy"])
else:
    print("[Info] load {}...".format(args["model"]))
    model = load_model(args["model"])

    # update the learning rate
    print("[Info] old learn rate:{}".format(
           K.get_value(model.optimizer.lr)))
    K.set_value(model.optimizer.lr, 1e-2)
    print("[Info] new learn rate:{}".format(
           K.get_value(model.optimizer.lr)))

callbacks = [
  EpochCheckpoint(args["checkpoints"], every=5,
    startAt=args["start_epoch"]),
  TrainingMonitor("output/resnet56_cifar10.png",
     jsonPath="output/resnet56_cifar10.json",
     startAt=args["start_epoch"])]

print("[Info] training network...")
model.fit_generator(
   aug.flow(trainX, trainY, batch_size=128),
   validation_data=(testX, testY),
   steps_per_epoch=len(trainX) // 128, epochs=80,
   callbacks=callbacks, verbose=1)
Beispiel #2
0
# initialize the label names for the CIFAR-10 dataset
labelNames = [
    "airplane", "automobile", "bird", "cat", "deer", "dog", "frog", "horse",
    "ship", "truck"
]

print("[INFO] compiling model... ")
opt = SGD(lr=0.01, momentum=0.9, nesterov=True)

model = MiniVGGNet.built(width=32, height=32, depth=3, classes=10)

model.compile(loss="categorical_crossentropy",
              optimizer=opt,
              metrics=["accuracy"])

figPath = os.path.sep.join([args["output"], "{}.jpg".format(os.getpid())])

jsonPath = os.path.sep.join([args["output"], "{}.json".format(os.getpid())])

callbacks = [TrainingMonitor(figPath, jsonPath=jsonPath)]

print("[INFO] training network...")

model.fit(trainX,
          trainY,
          validation_data=(trainX, trainY),
          batch_size=64,
          epochs=100,
          callbacks=callbacks,
          verbose=1)
                                aug=aug,
                                preprocessors=[pp, mp, iap],
                                classes=2)
valGen = HDF5DatasetGenerator(config.VAL_HDF5,
                              bSize,
                              preprocessors=[sp, mp, iap],
                              classes=2)

# initialize the optimizer
opt = Adam(lr=1e-3)
model = AlexNet.build(width=227, height=227, depth=3, classes=2, reg=0.0002)
model.compile(loss="binary_crossentropy", optimizer=opt, metrics=["accuracy"])

# construct the set of callbacks
path = os.path.sep.join([config.OUTPUT_PATH, "{}.png".format(os.getpid())])
callbacks = [TrainingMonitor(path)]

# train the network
model.fit_generator(trainGen.generator(),
                    steps_per_epoch=trainGen.numImages // bSize,
                    validation_data=valGen.generator(),
                    validation_steps=valGen.numImages // bSize,
                    epochs=75,
                    max_queue_size=bSize * 2,
                    callbacks=callbacks,
                    verbose=1)

# save the model to file
print("[INFO] serializing model...")
model.save(config.MODEL_PATH, overwrite=True)
                                                  test_size=0.20,
                                                  stratify=labels,
                                                  random_state=42)

#initialize the model
print("[INFO] compiling model...")
model = MiniVGGNetwork.build(width=28, height=28, depth=1, classes=2)
model.compile(loss="binary_crossentropy",
              optimizer="adam",
              metrics=["accuracy"])

# construct the set of  callbacks
# initialize the training monitor
figPath = os.path.sep.join([args["figure"], "{}.png".format(os.getpid())])
jsonPath = os.path.sep.join([args["figure"], "{}.json".format(os.getpid())])
training_monitor = TrainingMonitor(figPath=figPath, jsonPath=jsonPath)

# initialize the checkpoint improvements
checkpoint_improvements = ModelCheckpoint(args["model"],
                                          monitor="val_loss",
                                          mode="min",
                                          save_best_only=True,
                                          verbose=1)

# initialize the callbacks
callbacks = [training_monitor, checkpoint_improvements]

# train the network
print("[INFO] training network...")
H = model.fit(trainX,
              trainY,
Beispiel #5
0
                                     maxlen=length_max,
                                     padding='post')
#X_test_pad = sequence.pad_sequences(X_test_tokens, maxlen=length_max, padding='post')

figPath = os.path.sep.join([
    '/home/pavel/PycharmProjects/nn/pyimagesearch/plot/',
    "{}_.png".format(os.getpid())
])
jsonPath = os.path.sep.join([
    '/home/pavel/PycharmProjects/nn/pyimagesearch/plot/',
    "{}.json".format(os.getpid())
])

optimizer = SGD(lr=0.01, momentum=0.9, nesterov=True)
callbacks = [
    TrainingMonitor(jsonPath=jsonPath, figPath=figPath, val=False),
    LearningRateScheduler(step_decay)
]
model = RnnForWord().build(vocab_size=vocabulary_size,
                           max_review_length=length_max,
                           embedding_vector_length=length_max)
print("[INFO] compiling model...")
model.compile(loss='binary_crossentropy',
              optimizer=optimizer,
              metrics=['accuracy'])
history = model.fit(X_train_bad,
                    y_train,
                    epochs=50,
                    batch_size=16,
                    callbacks=callbacks)
Beispiel #6
0
else:
    print("[INFO] loading {}...".format(args["model"]))
    model = load_model(args["model"])

    # upate the learning rate
    print("[INFO] old learning rate: {}".format(K.get_value(
        model.optimizer.lr)))
    K.set_value(model.optimizer.lr, 1e-5)
    print("[INFO] new learning rate: {}".format(K.get_value(
        model.optimizer.lr)))

# construct the set of callbacks
callbacks = [
    EpochCheckpoint(args["checkpoints"], every=5, startAt=args["start_epoch"]),
    TrainingMonitor(config.FIG_PATH,
                    jsonPath=config.JSON_PATH,
                    startAt=args["start_epoch"])
]

# train the network
model.fit_generator(trainGen.generator(),
                    steps_per_epoch=trainGen.numImages // 64,
                    validation_data=valGen.generator(),
                    validation_steps=valGen.numImages // 64,
                    epochs=10,
                    max_queue_size=64 * 2,
                    callbacks=callbacks,
                    verbose=1)

# close the database
trainGen.close()
Beispiel #7
0
train_generator = HDF5DatasetGenerator(db_path=configs.TRAIN_HDF5,
                                       batch_size=configs.BATCH_SIZE,
                                       preprocessors=[pp, mp, iap],
                                       aug=aug,
                                       binarize=True,
                                       classes=2)

val_generator = HDF5DatasetGenerator(db_path=configs.TEST_HDF5,
                                     batch_size=configs.BATCH_SIZE,
                                     preprocessors=[sp, mp, iap],
                                     aug=None,
                                     binarize=True,
                                     classes=2)

path = os.path.sep.join([configs.OUTPUT_PATH, f"{os.getpid()}.png"])
callbacks = [TrainingMonitor(plot_path=path)]

opt = Adam(lr=1e-3)
model = AlexNet.build(width=227, height=227, classes=configs.NUM_CLASSES)

model.compile(opt, loss="binary_crossentropy", metrics=["accuracy"])
model.fit(
    train_generator.generate(),
    epochs=EPOCH,
    steps_per_epoch=train_generator.num_images // configs.BATCH_SIZE,
    validation_steps=val_generator.num_images // configs.BATCH_SIZE,
    validation_data=val_generator.generate(),
    max_queue_size=configs.BATCH_SIZE * 2,
    callbacks=callbacks,
)
Beispiel #8
0
else:
    # load the checkpoint from disk
    print("[INFO] loading {}...".format(args["model"]))
    model = load_model(args["model"])

    # update the learning rate
    print("[INFO] old learning rate: {}".format(K.get_value(
        model.optimizer.lr)))
    K.set_value(model.optimizer.lr, 1e-2)
    print("[INFO] new learning rate: {}".format(K.get_value(
        model.optimizer.lr)))

# build the path to the training plot and training history
plotPath = os.path.sep.join(["output", "resnet_fashion_mnist.png"])
jsonPath = os.path.sep.join(["output", "resnet_fashion_mnist.json"])

# construct the set of callbacks
callbacks = [
    EpochCheckpoint(args["checkpoints"], every=5, startAt=args["start_epoch"]),
    TrainingMonitor(plotPath, jsonPath=jsonPath, startAt=args["start_epoch"])
]

# train the network
print("[INFO] training network...")
model.fit(x=aug.flow(trainX, trainY, batch_size=128),
          validation_data=(testX, testY),
          steps_per_epoch=len(trainX) // 128,
          epochs=80,
          callbacks=callbacks,
          verbose=1)
# convert the labels from integers to vectors
lb = LabelBinarizer()
trainY = lb.fit_transform(trainY)
testY = lb.transform(testY)

# construct the image generator for data augmentation
aug = ImageDataGenerator(width_shift_range=0.1,
                         height_shift_range=0.1,
                         horizontal_flip=True,
                         fill_mode="nearest")

# construct the set of callbacks
figPath = os.path.sep.join([args["output"], "{}.png".format(os.getpid())])
jsonPath = os.path.sep.join([args["output"], "{}.png".format(os.getpid())])
callbacks = [
    TrainingMonitor(figPath, jsonPath=jsonPath),
    LearningRateScheduler(poly_decay)
]

# initialize the optimizer and model (ResNet-56)
print("[INFO] compiling model...")
opt = SGD(lr=INIT_LR, momentum=0.9)
model = ResNet.build(32, 32, 3, 10, (9, 9, 9), (64, 64, 128, 256), reg=0.0005)
model.compile(loss="categorical_crossentropy",
              optimizer=opt,
              metrics=["accuracy"])

# train the network
print("[INFO] training network...")
model.fit_generator(aug.flow(trainX, trainY, batch_size=128),
                    validation_data=(testX, testY),
Beispiel #10
0
lb = LabelBinarizer()
trainY = lb.fit_transform(trainY)
testY = lb.transform(testY)
labelNames = [
    "airplane", "automobile", "bird", "cat", "deer", "dog", "frog", "horse",
    "ship", "truck"
]

print("[INFO] compiling model...")
opt = SGD(lr=0.01, momentum=0.9, nesterov=True)
model = MiniVGGNet.build(width=32, height=32, depth=3, classes=10)
model.compile(loss="categorical_crossentropy",
              optimizer=opt,
              metrics=["accuracy"])

figPath = os.path.sep.join([args["output"], "{}_.png".format(os.getpid())])
jsonPath = os.path.sep.join([args["output"], "{}.json".format(os.getpid())])

callbacks = [
    TrainingMonitor(jsonPath=jsonPath, figPath=figPath),
    LearningRateScheduler(step_decay)
]

model.fit(trainX,
          trainY,
          validation_data=(testX, testY),
          batch_size=64,
          epochs=40,
          callbacks=callbacks,
          verbose=1)
Beispiel #11
0
# Run model
model = main_model()

model.compile(loss='categorical_crossentropy',
              optimizer='adam',
              metrics=['accuracy'])

plotPath = os.path.sep.join(["output", "CNN.png"])
jsonPath = os.path.sep.join(["output", "CNN.json"])

callbacks = [
    CSVLogger('log_%d_%d.csv' % (BATCH_SIZE, EPOCHS),
              append=True,
              separator=';'),
    EpochCheckpoint(args["checkpoints"], every=5, startAt=0),
    TrainingMonitor(plotPath, jsonPath=jsonPath, startAt=0),
]

#model.fit(model_train, [y_train, y_train],
#         batch_size=BATCH_SIZE,
#        epochs=EPOCHS,
#       validation_data=(model_val, [y_val, y_val]))

model.fit(model_train,
          y_train,
          batch_size=BATCH_SIZE,
          epochs=EPOCHS,
          validation_data=(model_val, y_val),
          callbacks=[csv_logger])

model.predict()
Beispiel #12
0
if args["model"] is None:
    print("[Info] compiling model...")
    opt = SGD(lr=INIT_LR, momentum=0.9)
    model = ResNet.build(32, 32, 3, 10, (9, 9, 9),
               (64, 64, 128, 256), reg=0.0005)
    model.compile(loss="categorical_crossentropy", optimizer=opt,
         metrics=["accuracy"])
else:
    print("[Info] load {}...".format(args["model"]))
    model = load_model(args["model"])

    # update the learning rate
    print("[Info] old learn rate:{}".format(
           K.get_value(model.optimizer.lr)))
    K.set_value(model.optimizer.lr, 1e-5)
    print("[Info] new learn rate:{}".format(
           K.get_value(model.optimizer.lr)))

callbacks = [
  TrainingMonitor("output/resnet56_cifar10.png",
     jsonPath="output/resnet56_cifar10.json"),
     LearningRateScheduler(poly_decay)]

print("[Info] training network...")
model.fit_generator(
   aug.flow(trainX, trainY, batch_size=128),
   validation_data=(testX, testY),
   steps_per_epoch=len(trainX) // 128, epochs=80,
   callbacks=callbacks, verbose=1)
Beispiel #13
0
    fill_mode="nearest",
    horizontal_flip=True,
    preprocessing_function=train_preprocessors.preprocess)

val_aug = ImageDataGenerator(
    preprocessing_function=val_preprocessors.preprocess)

train_generator = train_aug.flow_from_directory(directory=configs.TRAIN_IMAGE,
                                                batch_size=configs.BATCH_SIZE)
val_generator = val_aug.flow_from_directory(directory=configs.TEST_IMAGE,
                                            batch_size=configs.BATCH_SIZE)

# callback
path = os.path.sep.join([configs.OUTPUT_PATH, f"{os.getpid()}.png"])
callbacks = [
    TrainingMonitor(plot_path=path),
]

# initialize optimizer and model
opt = Adam(lr=1e-3)
model = AlexNet.build(width=227, height=227, classes=configs.NUM_CLASSES)

# compile and train model
model.compile(opt, loss="binary_crossentropy", metrics=["accuracy"])
model.fit(
    train_generator,
    epochs=EPOCH,
    steps_per_epoch=train_generator.samples // configs.BATCH_SIZE,
    validation_data=val_generator,
    validation_steps=val_generator.samples // configs.BATCH_SIZE,
    max_queue_size=configs.BATCH_SIZE * 2,