annotation[0, 1] = minY
            annotation[0, 2] = maxX
            annotation[0, 3] = maxY
            annotation[0, 4] = annot['rbbox']['label']
            annotations = np.append(annotations, annotation, axis=0)

        return annotations

    def coco_label_to_label(self, coco_label):
        return self.coco_labels_inverse[coco_label]

    def label_to_coco_label(self, label):
        return self.coco_labels[label]

    def image_aspect_ratio(self, image_index):
        #image = self.coco.loadImgs(self.image_ids[image_index])[0]
        return 1024 / 1024  #float(image['width']) / float(image['height'])

    def num_classes(self):
        return len(self.classes)


if __name__ == '__main__':
    from augmentation import get_augumentation
    dataset = CocoDataset("/content/gdrive/My Drive/findShip/test.pickle",
                          transform=get_augumentation(phase='train',
                                                      width=1024,
                                                      height=1024))
    sample = dataset[0]
    print('sample: ', sample)
Exemple #2
0
            annotation[0, :4] = a['bbox']
            annotation[0, 4] = self.coco_label_to_label(a['category_id'])
            annotations = np.append(annotations, annotation, axis=0)

        # transform from [x, y, w, h] to [x1, y1, x2, y2]
        annotations[:, 2] = annotations[:, 0] + annotations[:, 2]
        annotations[:, 3] = annotations[:, 1] + annotations[:, 3]

        return annotations

    def coco_label_to_label(self, coco_label):
        return self.coco_labels_inverse[coco_label]

    def label_to_coco_label(self, label):
        return self.coco_labels[label]

    def image_aspect_ratio(self, image_index):
        image = self.coco.loadImgs(self.image_ids[image_index])[0]
        return float(image['width']) / float(image['height'])

    def num_classes(self):
        return 9


if __name__ == '__main__':
    from augmentation import get_augumentation
    dataset = CocoDataset(root_dir='/root/data/coco', set_name='trainval35k',
                          transform=get_augumentation(phase='train'))
    sample = dataset[0]
    print('sample: ', sample)
Exemple #3
0

def visualize(annotations, category_id_to_name):
    img = annotations['image'].copy()
    for idx, bbox in enumerate(annotations['bboxes']):
        img = visualize_bbox(img, bbox, annotations['category_id'][idx],
                             category_id_to_name)
    plt.figure(figsize=(12, 12))
    plt.imshow(img)


dataset_root = '/home/toandm2/data/VOCdevkit'
network = 'efficientdet-d0'
dataset = VOCDetection(root=dataset_root,
                       transform=get_augumentation(
                           phase='train',
                           width=EFFICIENTDET[network]['input_size'],
                           height=EFFICIENTDET[network]['input_size']))


def visual_data(data):
    img = data['image']
    bboxes = data['bboxes']
    print(len(data['bboxes']))
    annotations = {
        'image': data['image'],
        'bboxes': data['bboxes'],
        'category_id': range(len(data['bboxes']))
    }
    category_id_to_name = {v: v for v in range(len(data['bboxes']))}

    visualize(annotations, category_id_to_name)