def random_transform_group_entry(self, image, annotations, transform=None): """ Randomly transforms image and annotation. """ # randomly transform both image and annotations if transform or self.transform_generator: if transform is None: transform = adjust_transform_for_image( next(self.transform_generator), image, self.transform_parameters.relative_translation) # apply transformation to image image = apply_transform(transform, image, self.transform_parameters) # randomly transform the masks and expand so to have a fake channel dimension for i, mask in enumerate(annotations['masks']): annotations['masks'][i] = apply_transform( transform, mask, self.transform_parameters) annotations['masks'][i] = np.expand_dims( annotations['masks'][i], axis=2) # Transform the bounding boxes in the annotations. annotations['bboxes'] = annotations['bboxes'].copy() for index in range(annotations['bboxes'].shape[0]): annotations['bboxes'][index, :] = transform_aabb( transform, annotations['bboxes'][index, :]) return image, annotations
def random_transform_group_entry(self, image, annotations, masks): # randomly transform both image and annotations if self.transform_generator: transform = adjust_transform_for_image(next(self.transform_generator), image, self.transform_parameters.relative_translation) image = apply_transform(transform, image, self.transform_parameters) # randomly transform the masks and expand so to have a fake channel dimension for m in range(len(masks)): masks[m] = apply_transform(transform, masks[m], self.transform_parameters) masks[m] = np.expand_dims(masks[m], axis=2) # randomly transform the bounding boxes annotations = annotations.copy() for index in range(annotations.shape[0]): annotations[index, :4] = transform_aabb(transform, annotations[index, :4]) return image, annotations, masks
def random_transform_group_entry(self, image, annotations, transform=None): """ Randomly transforms image and annotation. """ # randomly transform both image and annotations if transform is not None or self.transform_generator: if transform is None: transform = adjust_transform_for_image(next(self.transform_generator), image, self.transform_parameters.relative_translation) # apply transformation to image image = apply_transform(transform, image, self.transform_parameters) # Transform the bounding boxes in the annotations. annotations['bboxes'] = annotations['bboxes'].copy() for index in range(annotations['bboxes'].shape[0]): annotations['bboxes'][index, :] = transform_aabb(transform, annotations['bboxes'][index, :]) return image, annotations
from keras_retinanet.utils.transform import random_transform from keras_retinanet.utils.image import apply_transform, TransformParameters, adjust_transform_for_image, read_image_bgr from matplotlib.image import imsave if __name__ == "__main__": img = read_image_bgr('./TestFile/foo.jpg') transform = random_transform(flip_x_chance=1, flip_y_chance=0) print(transform) transform_parameters = TransformParameters() transform = adjust_transform_for_image( transform, img, transform_parameters.relative_translation) img_transformed = apply_transform(transform, img, transform_parameters) imsave('./TestFile/foo_transformed.jpg', img_transformed) exit(0)