def __init__(self, class_names, prior_boxes, variances=[.1, .2]): super(ShowBoxes, self).__init__() self.deprocess_boxes = SequentialProcessor([ pr.DecodeBoxes(prior_boxes, variances), pr.ToBoxes2D(class_names, True), pr.FilterClassBoxes2D(class_names[1:]) ]) self.denormalize_boxes2D = pr.DenormalizeBoxes2D() self.draw_boxes2D = pr.DrawBoxes2D(class_names) self.show_image = pr.ShowImage()
if split == pr.TRAIN: self.add(pr.ControlMap(self.augment_image, [0], [0])) self.add(pr.ControlMap(self.augment_boxes, [0, 1], [0, 1])) self.add(pr.ControlMap(self.preprocess_image, [0], [0])) self.add(pr.ControlMap(self.preprocess_boxes, [1], [1])) self.add( pr.SequenceWrapper({0: { 'image': [size, size, 3] }}, {1: { 'boxes': [len(prior_boxes), 4 + num_classes] }})) prior_boxes = create_prior_boxes() draw_boxes.processors[0].processor.one_hot_encoded = True draw_boxes.insert(0, pr.ControlMap(pr.DecodeBoxes(prior_boxes), [1], [1])) draw_boxes.insert( 2, pr.ControlMap(pr.FilterClassBoxes2D(class_names[1:]), [1], [1])) def deprocess_image(image): image = (image + pr.BGR_IMAGENET_MEAN).astype('uint8') return P.image.convert_color_space(image, pr.BGR2RGB) augmentator = AugmentDetection(prior_boxes, num_classes=len(class_names)) print('Image and boxes augmentations') for _ in range(10): sample = {'image': image_fullpath, 'boxes': box_data.copy()} data = augmentator(sample) image, boxes = data['inputs']['image'], data['labels']['boxes']