image_generator = ImageGenerator(ground_truth_data,
                                 prior_box_manager,
                                 batch_size,
                                 image_shape[0:2],
                                 train_keys,
                                 validation_keys,
                                 image_prefix,
                                 vertical_flip_probability=0,
                                 horizontal_flip_probability=0.5)

model_names = ('../trained_models/model_checkpoints/' +
               'weights.{epoch:02d}-{val_loss:.2f}.hdf5')
model_checkpoint = ModelCheckpoint(model_names,
                                   monitor='val_loss',
                                   verbose=1,
                                   save_best_only=False,
                                   save_weights_only=True)
learning_rate_schedule = ReduceLROnPlateau(monitor='val_loss',
                                           factor=0.1,
                                           patience=10,
                                           verbose=1,
                                           cooldown=20)

model.fit_generator(image_generator.flow(mode='train'),
                    len(train_keys),
                    num_epochs,
                    callbacks=[model_checkpoint, learning_rate_schedule],
                    validation_data=image_generator.flow(mode='val'),
                    nb_val_samples=len(validation_keys))
box_creator.draw_boxes(image_path + image_key, box_coordinates)

data_path = '../datasets/VOCdevkit/VOC2007/'
ground_truths = XMLParser(data_path + 'Annotations/').get_data()
prior_box_manager = PriorBoxAssigner(prior_boxes, ground_truths)
assigned_boxes = prior_box_manager.assign_boxes()
prior_box_manager.draw_assigned_boxes(image_path, image_shape[0:2], image_key)
batch_size = 7
train_keys, validation_keys = split_data(assigned_boxes, training_ratio=.8)

assigned_image_generator = ImageGenerator(assigned_boxes, batch_size,
                                          image_shape[0:2], train_keys,
                                          validation_keys,
                                          data_path + 'JPEGImages/')

transformed_image = next(assigned_image_generator.flow(mode='demo'))[0]
transformed_image = np.squeeze(transformed_image[0]).astype('uint8')
original_image = read_image(data_path + 'JPEGImages/' + validation_keys[0])
original_image = resize_image(original_image, image_shape[0:2])
plt.figure(1)
plt.subplot(121)
plt.title('Original image')
plt.imshow(original_image)
plt.subplot(122)
plt.title('Transformed image')
plt.imshow(transformed_image)
plt.show()

box_transfomer = BoxTransformer(assigned_boxes, ground_truths)
encoded_boxes = box_transfomer.encode_boxes()
示例#3
0
# In[10]:

base_lr = 3e-4
optim = keras.optimizers.Adam(lr=base_lr)
# optim = keras.optimizers.RMSprop(lr=base_lr)
# optim = keras.optimizers.SGD(lr=base_lr, momentum=0.9, decay=decay, nesterov=True)
model.compile(optimizer=optim,
              loss=MultiboxLoss(NUM_CLASSES, neg_pos_ratio=2.0).compute_loss)

# In[11]:
#
#nb_epoch = 30
#history = model.fit_generator(gen.generate(True), gen.train_batches,
#                              nb_epoch, verbose=1,
#                              callbacks=callbacks,
#                              validation_data=gen.generate(False),
#                              nb_val_samples=gen.val_batches,
#                              nb_worker=1)
#

nb_epoch = 30
history = model.fit_generator(gen.flow(mode='train'),
                              len(train_keys),
                              nb_epoch,
                              verbose=1,
                              callbacks=callbacks,
                              validation_data=gen.flow('val'),
                              nb_val_samples=len(val_keys),
                              nb_worker=1)
示例#4
0
# parameters
root_prefix = '../datasets/VOCdevkit/VOC2007/'
image_prefix = root_prefix + 'JPEGImages/'
ground_data_prefix = root_prefix + 'Annotations/'
model = my_SSD(num_classes=21)
image_shape = model.input_shape[1:]
background_id = 0

ground_truth_manager = XMLParser(ground_data_prefix, background_id)
ground_truth_data = ground_truth_manager.get_data()
box_creator = PriorBoxCreator(model)
prior_boxes = box_creator.create_boxes()
prior_box_manager = PriorBoxManager(prior_boxes, background_id)

prior_boxes = flatten_prior_boxes(prior_boxes)

train_keys, validation_keys = split_data(ground_truth_data, training_ratio=.8)

batch_size = 10
image_generator = ImageGenerator(ground_truth_data,
                                 prior_box_manager,
                                 batch_size,
                                 image_shape[0:2],
                                 train_keys,
                                 validation_keys,
                                 image_prefix,
                                 vertical_flip_probability=0,
                                 horizontal_flip_probability=0.5)

data = next(image_generator.flow(mode='train'))
        'min_size': 222.0,
        'max_size': 276.0,
        'aspect_ratios': [1.0, 1.0, 2.0, 1 / 2.0, 3.0, 1 / 3.0]
    },
    {
        'layer_width': 1,
        'layer_height': 1,
        'num_prior': 6,
        'min_size': 276.0,
        'max_size': 330.0,
        'aspect_ratios': [1.0, 1.0, 2.0, 1 / 2.0, 3.0, 1 / 3.0]
    },
]

priors = create_prior_box(image_shape[0:2], box_configs, variances)
bounding_box_utils = BoundingBoxUtility(num_classes, priors)
ground_truth_data = XML_preprocessor(data_path + 'Annotations/').data

keys = sorted(ground_truth_data.keys())
num_train = int(round(training_data_ratio * len(keys)))
train_keys = keys[:num_train]
validation_keys = keys[num_train:]
num_val = len(validation_keys)

image_generator = ImageGenerator(ground_truth_data, bounding_box_utils,
                                 batch_size, data_path + 'JPEGImages/',
                                 train_keys, validation_keys, image_shape[:2])

gen = image_generator.flow(True)
next(gen)
示例#6
0
base_learning_rate = 3e-4


def schedule(epoch, decay=0.9):
    return base_learning_rate * decay**(epoch)


learning_rate_scheduler = LearningRateScheduler(schedule)
callbacks = [model_checkpoint, learning_rate_scheduler]

# In[16]:

multibox_loss = MultiboxLoss(num_classes, neg_pos_ratio=2.0).compute_loss

# In[17]:

optimizer = Adam(lr=base_learning_rate)
model.compile(optimizer=optimizer, loss=multibox_loss)

# In[18]:

nb_epoch = 30
history = model.fit_generator(image_generator.flow(True),
                              num_train,
                              nb_epoch,
                              verbose=1,
                              callbacks=callbacks,
                              validation_data=image_generator.flow(False),
                              nb_val_samples=num_val,
                              nb_worker=1)
示例#7
0
images_path = '../datasets/german_open_dataset/images/'
labels = get_labels(dataset_name)
num_classes = len(list(labels.keys()))
use_bounding_boxes = True

data_loader = XMLParser(ground_truth_path, dataset_name,
                        use_bounding_boxes=use_bounding_boxes)
ground_truth_data = data_loader.get_data()

train_keys, val_keys = split_data(ground_truth_data,
                                    training_ratio=.5,
                                    do_shuffle=True)

image_generator = ImageGenerator(ground_truth_data, batch_size, input_shape[:2],
                                train_keys, val_keys, None,
                                path_prefix=images_path,
                                vertical_flip_probability=0,
                                do_random_crop=True,
                                use_bounding_boxes=use_bounding_boxes)

batch_data = next(image_generator.flow('demo'))

batch_images = batch_data[0]['image_array_input']
batch_classes = batch_data[1]['predictions']

for image_arg in range(batch_size):
    image = batch_images[image_arg]
    label_arg = np.argmax(batch_classes[image_arg])
    label = labels[label_arg]
    display_image(image, label)
decoded_positive_boxes = assigned_decoded_boxes[positive_mask]
box_visualizer.draw_normalized_box(decoded_positive_boxes, selected_key)

# drawing generator output
train_keys, validation_keys = split_data(ground_truth_data, training_ratio=.8)
image_generator = ImageGenerator(ground_truth_data,
                                 prior_box_manager,
                                 1,
                                 image_shape,
                                 train_keys,
                                 validation_keys,
                                 image_prefix,
                                 vertical_flip_probability=0,
                                 horizontal_flip_probability=0.5)

generated_data = next(image_generator.flow(mode='demo'))
generated_input = generated_data[0]['input_1']
generated_output = generated_data[1]['predictions']
transformed_image = np.squeeze(generated_input[0]).astype('uint8')
validation_image_name = image_prefix + validation_keys[0]
original_image = read_image(validation_image_name)
original_image = resize_image(original_image, image_shape)
plot_images(original_image, transformed_image)

# finally draw the assigned boxes given by the generator
generated_encoded_boxes = np.squeeze(generated_output)
generated_boxes = prior_box_manager.decode_boxes(generated_encoded_boxes)
positive_mask = generated_boxes[:, 4] != 1
generated_positive_boxes = generated_boxes[positive_mask]
box_visualizer.draw_normalized_box(generated_positive_boxes,
                                   validation_keys[0])
model.compile(optimizer='adam',
            loss='categorical_crossentropy',
            metrics=['accuracy'])
model.summary()

# model callbacks
csv_logger = CSVLogger(log_file_path, append=False)
model_names = trained_models_path + '.{epoch:02d}-{val_acc:.2f}.hdf5'
model_checkpoint = ModelCheckpoint(model_names,
                                   monitor='val_loss',
                                   verbose=1,
                                   save_best_only=True,
                                   save_weights_only=False)

# model training
model.fit_generator(image_generator.flow(mode='train'),
                    steps_per_epoch=int(len(train_keys) / batch_size),
                    epochs=num_epochs, verbose=1,
                    callbacks=[csv_logger, model_checkpoint],
                    validation_data= image_generator.flow('val'),
                    validation_steps=int(len(val_keys) / batch_size))

#Adding Images for testing

import cv2
from keras.models import load_model
import numpy as np
from statistics import mode
from utils import preprocess_input
from utils import get_labels