Exemple #1
0
model = SqueezeNet(nb_classes, input_shape=input_shape)
adam = Adam(lr=0.040)
sgd = SGD(lr=0.1, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(loss="categorical_crossentropy",
              optimizer='adam',
              metrics=['accuracy'])
if os.path.isfile(weights_file):
    print('Loading weights: %s' % weights_file)
    model.load_weights(weights_file, by_name=True)

print('Fitting model')
# model.fit(images, classes, batch_size=batch_size, nb_epoch=nb_epoch, verbose=1, validation_split=0.2, initial_epoch=0)
model.fit_generator(training_data,
                    samples_per_epoch=samples_per_epoch,
                    validation_data=validation_data,
                    nb_val_samples=nb_val_samples,
                    nb_epoch=nb_epoch,
                    verbose=1,
                    initial_epoch=initial_epoch)
print("Finished fitting model")

print('Saving weights')
model.save_weights(weights_file, overwrite=True)
print('Evaluating model')
# score = model.evaluate(images, classes, verbose=1)

# validation_data = ( (load_image(x), to_categorical([y], nb_classes=nb_classes)) for x, y in test )
# validation_data = gen(X_test, Y_test)
score = model.evaluate_generator(validation_data, val_samples=nb_val_samples)
# score = model.evaluate(X_test, Y_test, verbose=1)
print('result: %s' % score)
Exemple #2
0
# log progress to AML workspace
if remote_execution:

    class LogRunMetrics(Callback):
        # callback at the end of every epoch
        def on_epoch_end(self, epoch, log):
            # log a value repeated which creates a list
            run.log('val_loss', log['val_loss'])
            run.log('loss', log['loss'])

    callbacks.append(LogRunMetrics())

model.fit_generator(train_generator,
                    steps_per_epoch=50,
                    epochs=30,
                    verbose=1,
                    callbacks=callbacks,
                    validation_data=val_generator,
                    validation_steps=80,
                    workers=4)

if remote_execution:
    run.log('final_val_loss', model.history.history['val_loss'][-1])
    run.log('final_val_accuracy', model.history.history['val_accuracy'][-1])
# # Loss/epoch plots

# # Loss/epoch plots

# In[19]:

plt.plot(model.history.history['categorical_crossentropy'], label='train')
plt.plot(model.history.history['val_categorical_crossentropy'], label='val')
Exemple #3
0
    print('Loading weights: %s' % weights_file)
    model.load_weights(weights_file, by_name=True)
else:
    # if it is a new model, set balanced bias for the last layer
    w, _ = model.get_layer('conv10').get_weights()
    model.get_layer('conv10').set_weights(
        [w, np.log(class_weight / sum(class_weight))])

print('Fitting model')
# Balance class weights by frequency and give preference for RED lights.
class_weight = class_weight.compute_class_weight(
    'balanced', np.unique(train_generator.classes), train_generator.classes)
class_weight[0] *= 2

model.fit_generator(train_generator,
                    samples_per_epoch=samples_per_epoch,
                    validation_data=val_generator,
                    validation_steps=nb_val_samples // 64,
                    nb_epoch=nb_epoch,
                    verbose=1,
                    initial_epoch=initial_epoch,
                    class_weight=class_weight)
print("Finished fitting model")

print('Saving weights')
model.save_weights(weights_file, overwrite=True)
print('Evaluating model')
score = model.evaluate_generator(val_generator,
                                 steps=int(samples_per_epoch / nb_val_samples))
print('result: %s' % score)
Exemple #4
0
# Convert class vectors to binary class matrices.
y_train = keras.utils.to_categorical(y_train, num_classes)
x_train = x_train.astype(np.float32)
x_train /= 255.

img_shape = x_train[0].shape
model = SqueezeNet(img_shape, num_classes)

# This will do preprocessing and realtime data augmentation:
datagen = ImageDataGenerator(
    featurewise_center=False,  # set input mean to 0 over the dataset
    samplewise_center=False,  # set each sample mean to 0
    featurewise_std_normalization=False,  # divide inputs by std of the dataset
    samplewise_std_normalization=False,  # divide each input by its std
    zca_whitening=False,  # apply ZCA whitening
    rotation_range=0,  # randomly rotate images in the range (degrees, 0 to 180)
    width_shift_range=
    0.1,  # randomly shift images horizontally (fraction of total width)
    height_shift_range=
    0.1,  # randomly shift images vertically (fraction of total height)
    horizontal_flip=True,  # randomly flip images
    vertical_flip=False)  # randomly flip images
datagen.fit(x_train)

#model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, shuffle=True)
model.fit_generator(datagen.flow(x_train, y_train, batch_size=batch_size),
                    steps_per_epoch=x_train.shape[0] // batch_size,
                    epochs=epochs)

model.save(os.path.join(path, 'model.h5'))
Exemple #5
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--train-dir', default='data/train')
    parser.add_argument('--test-dir', default='data/test')
    parser.add_argument('--logdir', default='logs')
    parser.add_argument('--batch-size', type=int, default=32)
    parser.add_argument('--epochs', type=int, required=True)
    parser.add_argument('--num-classes', type=int, required=True)
    parser.add_argument('--checkpoint-pattern',
                        default='weights-{epoch:d}-{val_acc:.4f}.hdf5')
    parser.add_argument('--learning-rate', type=float, default=1e-4)
    parser.add_argument('--restore')
    args = parser.parse_args()

    # count samples
    train_files = count_files(args.train_dir, '.png')
    print('Found %d train files.' % train_files)
    test_files = count_files(args.test_dir, '.png')
    print('Found %d test files.' % test_files)

    if args.restore:
        model = SqueezeNet(weights=None, classes=args.num_classes)
        model.load_weights(args.restore)
    else:
        model = SqueezeNet(weights='imagenet', classes=args.num_classes)

    model.compile(loss='categorical_crossentropy',
                  optimizer=optimizers.Adam(lr=args.learning_rate),
                  metrics=['accuracy'])

    train_datagen = ImageDataGenerator(
        shear_range=0.2,
        zoom_range=0.2,
        horizontal_flip=True,
        preprocessing_function=preprocess_single)

    test_datagen = ImageDataGenerator(preprocessing_function=preprocess_single)

    train_generator = train_datagen.flow_from_directory(
        args.train_dir,
        target_size=(SIZE, SIZE),
        batch_size=args.batch_size,
        class_mode='categorical')

    test_generator = test_datagen.flow_from_directory(
        args.test_dir,
        target_size=(SIZE, SIZE),
        batch_size=args.batch_size,
        class_mode='categorical')

    checkpoint = ModelCheckpoint(args.checkpoint_pattern,
                                 monitor='val_acc',
                                 verbose=1,
                                 save_best_only=True,
                                 mode='max')
    tensorboard = TensorBoard(log_dir=args.logdir,
                              histogram_freq=0,
                              batch_size=args.batch_size,
                              write_graph=True,
                              write_grads=False,
                              write_images=False,
                              embeddings_freq=0,
                              embeddings_layer_names=None,
                              embeddings_metadata=None)
    callbacks = [checkpoint, tensorboard]

    model.fit_generator(train_generator,
                        steps_per_epoch=(train_files // args.batch_size),
                        epochs=args.epochs,
                        validation_data=test_generator,
                        validation_steps=(test_files // args.batch_size),
                        callbacks=callbacks)