Exemple #1
0
generator = ImageDataGenerator(rotation_range=30,
                               width_shift_range=0.1,
                               height_shift_range=0.1,
                               zoom_range=.1,
                               horizontal_flip=True)

# instantiating sequencers
sequencers = {}
for split in splits:
    data_manager = data_managers[split]
    size, num_classes = data_manager.image_size[0], data_manager.num_classes
    if split == 'train':
        pipeline = ProcessGrayImage(size, num_classes, generator)
    else:
        pipeline = ProcessGrayImage(size, num_classes)
    sequencers[split] = ProcessingSequence(pipeline, args.batch_size,
                                           datasets[split])

# instantiating model
name_to_model = {'MINI-XCEPTION': MiniXception}
Model = name_to_model[args.model]
input_shape = pipeline.processors[-1].inputs_info[0]['image']
num_classes = pipeline.processors[-1].labels_info[1]['label'][0]
model = Model(input_shape, num_classes)
model.compile(loss=keras.losses.categorical_crossentropy,
              optimizer=keras.optimizers.Adam(),
              metrics=['accuracy'])

# creating training dataset
experiment_label = '_'.join([args.dataset, model.name, args.run_label])
experiment_path = os.path.join(args.save_path, experiment_label)
if not os.path.exists(experiment_path):
Exemple #2
0
            keypoints = self.denormalize_keypoints(keypoints, cropped_image)
            keypoints = self.change_coordinates(keypoints, box2D)
            keypoints2D.append(keypoints)
            contour = self.draw_probabilities(cropped_image, probabilities)
            contours.append(contour)
            image = self.draw_keypoints(image, keypoints)
        image = self.draw_boxes2D(image, boxes2D)
        return self.wrap(image, boxes2D, keypoints2D, contours)


if __name__ == '__main__':
    from paz.abstract import ProcessingSequence
    from paz.backend.image import show_image

    from facial_keypoints import FacialKeypoints

    data_manager = FacialKeypoints('dataset/', 'train')
    dataset = data_manager.load_data()
    augment_keypoints = AugmentKeypoints('train', with_partition=False)
    for arg in range(1, 100):
        sample = dataset[arg]
        predictions = augment_keypoints(sample)
        original_image = predictions['inputs']['image'][:, :, 0]
        original_image = original_image * 255.0
        kp = predictions['labels']['keypoints']
        kp = denormalize_keypoints(kp, 96, 96)
        original_image = draw_circles(original_image, kp.astype('int'))
        show_image(original_image.astype('uint8'))
    sequence = ProcessingSequence(augment_keypoints, 32, dataset, True)
    batch = sequence.__getitem__(0)
Exemple #3
0
loss = MultiBoxLoss()
metrics = {'boxes': [loss.localization,
                     loss.positive_classification,
                     loss.negative_classification]}
model.compile(optimizer, loss.compute_loss, metrics)

# setting data augmentation pipeline
augmentators = []
for split in [TRAIN, VAL]:
    augmentator = AugmentDetection(model.prior_boxes, split)
    augmentators.append(augmentator)

# setting sequencers
sequencers = []
for data, augmentator in zip(datasets, augmentators):
    sequencer = ProcessingSequence(augmentator, args.batch_size, data)
    sequencers.append(sequencer)

# setting callbacks
model_path = os.path.join(args.save_path, model.name)
if not os.path.exists(model_path):
    os.makedirs(model_path)
log = CSVLogger(os.path.join(model_path, model.name + '-optimization.log'))
save_path = os.path.join(model_path, 'weights.{epoch:02d}-{val_loss:.2f}.hdf5')
checkpoint = ModelCheckpoint(save_path, verbose=1, save_weights_only=True)
schedule = LearningRateScheduler(
    args.learning_rate, args.gamma_decay, args.scheduled_epochs)
evaluate = EvaluateMAP(
    evaluation_data_managers[0],
    DetectSingleShot(model, data_managers[0].class_names, 0.01, 0.45),
    args.evaluation_period,
Exemple #4
0
# loading splits
data_managers, datasets = {}, {}
for split in splits:
    args_manager = [args.image_path, args.label_path, split]
    data_manager = name_to_manager[args.dataset](*args_manager)
    data_managers[split] = data_manager
    datasets[split] = data_manager.load_data()

# instantiating sequencers
sequencers = {}
for split in splits:
    data_manager = data_managers[split]
    num_classes = data_manager.num_classes
    image_shape = (args.image_size, args.image_size)
    processor = PreprocessSegmentationIds(image_shape, num_classes)
    sequencers[split] = ProcessingSequence(processor, args.batch_size,
                                           datasets[split])

# instantiating model
name_to_model = {
    'UNET_VGG16': UNET_VGG16,
    'UNET_VGG19': UNET_VGG19,
    'UNET_RESNET50': UNET_RESNET50
}
Model = name_to_model[args.model]
num_classes = data_managers['train'].num_classes
input_shape = processor.processors[-1].inputs_info[0]['input_1']
print(num_classes, input_shape)
model = Model(num_classes,
              input_shape,
              freeze_backbone=args.freeze_backbone,
              activation=args.activation)
def deprocess_image(image):
    image = (image + pr.BGR_IMAGENET_MEAN).astype('uint8')
    return P.image.convert_color_space(image, pr.BGR2RGB)


augmentator = AugmentDetection(prior_boxes, num_classes=len(class_names))
print('Image and boxes augmentations')
for _ in range(10):
    sample = {'image': image_fullpath, 'boxes': box_data.copy()}
    data = augmentator(sample)
    image, boxes = data['inputs']['image'], data['labels']['boxes']
    image = deprocess_image(image)
    draw_boxes(image, boxes)

# Note that we change the input and output format from lists to a dictionaries.
# The input changed by adding the ``pr.UnpackDictionary`` processor, and the
# output changed by the ``pr.SequenceWrapper`` processor.
# The ``pr.SequenceWrapper`` method allows us to easily connect the complete
# pipeline to a Sequence Generator.
data = [{'image': image_fullpath, 'boxes': box_data}]
print('Image and boxes augmentations with generator')
batch_size = 1
sequence = ProcessingSequence(augmentator, batch_size, data)
for _ in range(10):
    batch = sequence.__getitem__(0)
    batch_images, batch_boxes = batch[0]['image'], batch[1]['boxes']
    image, boxes = batch_images[0], batch_boxes[0]
    image = deprocess_image(image)
    draw_boxes(image, boxes)
Exemple #6
0
if __name__ == "__main__":
    import os
    from paz.datasets import FER, FERPlus

    # data generator and augmentations
    generator = ImageDataGenerator(rotation_range=30,
                                   width_shift_range=0.1,
                                   height_shift_range=0.1,
                                   zoom_range=.1,
                                   horizontal_flip=True)

    pipeline = ProcessGrayImage(48, 8, generator)
    dataset = 'FERPlus'

    data_path = os.path.join(os.path.expanduser('~'), '.keras/paz/datasets/')
    name_to_manager = {'FER': FER, 'FERPlus': FERPlus}
    data_managers, datasets = {}, {}
    data_path = os.path.join(data_path, dataset)
    kwargs = {'path': data_path} if dataset in ['FERPlus'] else {}
    data_manager = name_to_manager[dataset](split='train', **kwargs)
    data = data_manager.load_data()

    sequence = ProcessingSequence(pipeline, 32, data)
    batch = sequence.__getitem__(0)
    show = pr.ShowImage()
    for arg in range(32):
        image = batch[0]['image'][arg][..., 0]
        image = 255 * image
        image = image.astype('uint8')
        show(image)
Exemple #7
0
data_manager = Shapes(num_samples,
                      image_shape,
                      iou_thresh=iou_thresh,
                      max_num_shapes=max_num_shapes)
num_classes = data_manager.num_classes
data = data_manager.load_data()
processor = PreprocessSegmentation(image_shape, num_classes)

# setting additional callbacks
callbacks = []
log_filename = os.path.join(experiment_path, 'optimization.log')
log = CSVLogger(log_filename)
stop = EarlyStopping('loss', patience=stop_patience)
save_filename = os.path.join(experiment_path, 'model.hdf5')
save = ModelCheckpoint(save_filename, 'loss', save_best_only=True)
plateau = ReduceLROnPlateau('loss', patience=reduce_patience)
callbacks.extend([log, stop, save, plateau])

model = UNET_VGG16(num_classes, input_shape, 'imagenet', freeze, activation)
sequence = ProcessingSequence(processor, batch_size, data)
optimizer = Adam()
model.compile(optimizer, loss, metrics)
model.summary()
model.fit(sequence, batch_size=batch_size, epochs=epochs, callbacks=callbacks)

colors = [[0.0, 0.0, 0.0], [1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]]
postprocess = PostprocessSegmentation(model, colors)
for sample in data:
    image = sample['image']
    postprocess(image)
Exemple #8
0
    'train': data[:num_train_samples],
    'validation': data[num_train_samples:]
}

# instantiate keypoint augmentations
delta_scales = [args.delta_scales, args.delta_scales]
processor = {}
for phase in ['train', 'validation']:
    processor[phase] = AugmentKeypoints(phase, args.rotation_range,
                                        delta_scales, True, args.num_keypoints)

# creating sequencers
sequence = {}
for phase in ['train', 'validation']:
    pipeline, data = processor[phase], datasets[phase]
    sequence[phase] = ProcessingSequence(pipeline, args.batch_size, data, True)

# instantiate model
batch_shape = (args.batch_size, args.image_size, args.image_size, 1)
model = GaussianMixtureModel(batch_shape, args.num_keypoints, args.filters)
model.summary()


# creating loss function for gaussian mixture model
def negative_log_likelihood(y_true, predicted_distributions):
    log_likelihood = predicted_distributions.log_prob(y_true)
    return -log_likelihood


# setting optimizer and compiling model
optimizer = Adam(args.learning_rate, amsgrad=True)
Exemple #9
0
class FlipBoxesLeftRight(Processor):
    def __init__(self):
        super(FlipBoxesLeftRight, self).__init__()

    def call(self, image, boxes):
        width = image.shape[1]
        boxes[:, [0, 2]] = width - boxes[:, [2, 0]]
        image = image[:, ::-1]
        return image, boxes


data = [{
    'value_A': np.array([[1.0, 2.0, 3.0, 4.0]]),
    'value_B': np.array([[1.0, 1.1, 1.2], [2.0, 2.1, 2.2]])
}]
processor = SequentialProcessor()
processor.add(pr.UnpackDictionary(['value_A', 'value_B']))
processor.add(FlipBoxesLeftRight())
processor.add(
    pr.SequenceWrapper({0: {
        'value_A': [1, 4]
    }}, {1: {
        'value_B': [2, 3]
    }}))
sequence = ProcessingSequence(processor, 1, data)

for _ in range(10):
    batch = sequence.__getitem__(0)
    value_A, value_B = batch[0]['value_A'][0], batch[1]['value_B'][0]
    print(value_B)