コード例 #1
0
ファイル: train_ssd.py プロジェクト: LXL1314/learn-gluoncv
def get_dataset(dataset, args):
    # load training and validation images
    if dataset.lower == 'voc':
        train_dataset = gdata.VOCDetection(root=args.dataset_root + "/voc",
                                           splits=[(2007, 'trainval'),
                                                   (2012, 'trainval')])
        val_dataset = gdata.VOCDetection(root=args.dataset_root + "/voc",
                                         splits=[(2007, 'test')])
        val_metric = VOC07MApMetric(iou_thresh=0.5,
                                    class_names=val_dataset.classes)
        # if class_names is provided, will print out AP for each class
    elif dataset.lower == 'coco':
        train_dataset = gdata.COCODetection(root=args.dataset_root + "/coco",
                                            splits='instances_train2017')
        val_dataset = gdata.COCODetection(root=args.dataset_root + "/coco",
                                          splits='instances_val2017',
                                          skip_empty=False)
        val_metric = COCODetectionMetric(val_dataset,
                                         args.save_prefix + '_eval',
                                         cleanup=True,
                                         data_shape=(args.data_shape,
                                                     args.data_shape))
        # will print out AP for each class
        if args.val_interval == 1:
            # args.val_interval: 进行验证集测试的循环间隔
            # 如果进行测试很慢的话, 需要将该值改大一些,以加快训练
            args.val_interval = 10
    else:
        raise NotImplementedError(
            "dataset: {} not implemented".format(dataset))
    return train_dataset, val_dataset, val_metric
コード例 #2
0
def get_dataset(dataset, args):
    if dataset.lower() == 'voc':
        train_dataset = gdata.VOCDetection(
            splits=[(2007, 'trainval'), (2012, 'trainval')])
        val_dataset = gdata.VOCDetection(splits=[(2007, 'test')])
        val_metric = VOC07MApMetric(iou_thresh=0.5,
                                    class_names=val_dataset.classes)
    elif dataset.lower() == 'coco':
        train_dataset = gdata.COCODetection(splits='instances_train2017',
                                            use_crowd=False)
        val_dataset = gdata.COCODetection(splits='instances_val2017',
                                          skip_empty=False)
        val_metric = COCODetectionMetric(val_dataset,
                                         args.save_prefix + '_eval',
                                         cleanup=True,
                                         data_shape=(args.data_shape,
                                                     args.data_shape))
    else:
        raise NotImplementedError(
            'Dataset: {} not implemented.'.format(dataset))
    if args.num_samples < 0:
        args.num_samples = len(train_dataset)
    if args.mixup:
        from gluoncv.data import MixupDetection
        train_dataset = MixupDetection(train_dataset)
    return train_dataset, val_dataset, val_metric
コード例 #3
0
ファイル: train_ssd.py プロジェクト: ixhorse/gluon-cv
def get_dataset(dataset, args):
    if dataset.lower() == 'voc':
        train_dataset = gdata.VOCDetection(root=args.dataset_root,
                                           splits=[(2007, 'trainval'),
                                                   (2012, 'trainval')])
        val_dataset = gdata.VOCDetection(root=args.dataset_root,
                                         splits=[(2007, 'test')])
        val_metric = VOC07MApMetric(iou_thresh=0.5,
                                    class_names=val_dataset.classes)
    elif dataset.lower() == 'coco':
        train_dataset = gdata.COCODetection(splits='instances_train2017')
        val_dataset = gdata.COCODetection(splits='instances_val2017',
                                          skip_empty=False)
        val_metric = COCODetectionMetric(val_dataset,
                                         args.save_prefix + '_eval',
                                         cleanup=True,
                                         data_shape=(args.data_shape,
                                                     args.data_shape))
        # coco validation is slow, consider increase the validation interval
        if args.val_interval == 1:
            args.val_interval = 10
    elif dataset.lower() == 'tt100k':
        train_dataset = gdata.TT100KDetection(root=args.dataset_root,
                                              splits='train')
        val_dataset = None
        val_metric = None
    else:
        raise NotImplementedError(
            'Dataset: {} not implemented.'.format(dataset))
    return train_dataset, val_dataset, val_metric
コード例 #4
0
def get_dataset(dataset, args):
    if dataset.lower() == 'voc':
        if 0:
            train_dataset = gdata.VOCDetection(root='E:/dataset/VOCdevkit',
                                               splits=[(2007, 'trainval'),
                                                       (2012, 'trainval')])
            val_dataset = gdata.VOCDetection(root='E:/dataset/VOCdevkit',
                                             splits=[(2007, 'test')])
            val_metric = VOC07MApMetric(iou_thresh=0.5,
                                        class_names=val_dataset.classes)
        else:
            voc_root = 'G:/MSDataset/'  #layout same with VOC07
            train_dataset = gdata.MSDetection(root=voc_root,
                                              splits=[(2007, 'trainval')])
            val_dataset = gdata.MSDetection(root=voc_root,
                                            splits=[(2007, 'test')])
            val_metric = VOC07MApMetric(iou_thresh=0.5,
                                        class_names=val_dataset.classes)
    elif dataset.lower() == 'coco':
        train_dataset = gdata.COCODetection(splits='instances_train2017',
                                            use_crowd=False)
        val_dataset = gdata.COCODetection(splits='instances_val2017',
                                          skip_empty=False)
        val_metric = COCODetectionMetric(val_dataset,
                                         args.save_prefix + '_eval',
                                         cleanup=True)
    else:
        raise NotImplementedError(
            'Dataset: {} not implemented.'.format(dataset))
    if args.mixup:
        from gluoncv.data.mixup import MixupDetection
        train_dataset = MixupDetection(train_dataset)
    return train_dataset, val_dataset, val_metric
コード例 #5
0
def get_dataset(dataset, args):
    if dataset.lower() == 'voc':
        train_dataset = VOCLike(
            root='C:/Users/DELL/Desktop/traindata/VOCtemplate',
            splits=((2018, 'train'), ))
        val_dataset = VOCLike(
            root='C:/Users/DELL/Desktop/traindata/VOCtemplate',
            splits=((2018, 'val'), ))
        val_metric = VOC07MApMetric(iou_thresh=0.5,
                                    class_names=val_dataset.classes)
    elif dataset.lower() == 'coco':
        train_dataset = gdata.COCODetection(root=args.dataset_root + "/coco",
                                            splits='instances_train2017')
        val_dataset = gdata.COCODetection(root=args.dataset_root + "/coco",
                                          splits='instances_val2017',
                                          skip_empty=False)
        val_metric = COCODetectionMetric(val_dataset,
                                         args.save_prefix + '_eval',
                                         cleanup=True,
                                         data_shape=(args.data_shape,
                                                     args.data_shape))
        # coco validation is slow, consider increase the validation interval
        if args.val_interval == 1:
            args.val_interval = 10
    else:
        raise NotImplementedError(
            'Dataset: {} not implemented.'.format(dataset))
    return train_dataset, val_dataset, val_metric
コード例 #6
0
def get_dataset(dataset, args):
    if dataset.lower() == 'voc':
        train_dataset = gdata.VOCDetection(
            splits=[(2007, 'trainval'), (2012, 'trainval')])
        val_dataset = gdata.VOCDetection(
            splits=[(2007, 'test')])
        #print(val_dataset.classes)
        #('aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse', 'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor')

        val_metric = VOC07MApMetric(iou_thresh=0.5, class_names=val_dataset.classes)
    elif dataset.lower() == 'coco':
        train_dataset = gdata.COCODetection(splits='instances_train2017', use_crowd=False)
        val_dataset = gdata.COCODetection(splits='instances_val2017', skip_empty=False)
        val_metric = COCODetectionMetric(val_dataset, args.save_prefix + '_eval', cleanup=True)
    elif dataset.lower() == 'pedestrian':
        lst_dataset = LstDetection('train_val.lst',root=os.path.expanduser('.'))
        print(len(lst_dataset))
        first_img = lst_dataset[0][0]

        print(first_img.shape)
        print(lst_dataset[0][1])
        
        train_dataset = LstDetection('train.lst',root=os.path.expanduser('.'))
        val_dataset = LstDetection('val.lst',root=os.path.expanduser('.'))
        classs = ('pedestrian',)
        val_metric = VOC07MApMetric(iou_thresh=0.5,class_names=classs)
        
    else:
        raise NotImplementedError('Dataset: {} not implemented.'.format(dataset))
    if args.mixup:
        from gluoncv.data.mixup import MixupDetection
        train_dataset = MixupDetection(train_dataset)
    return train_dataset, val_dataset, val_metric
コード例 #7
0
ファイル: train_fcos.py プロジェクト: p517332051/mygluon_cv
def get_dataset(dataset, args):
    if dataset.lower() == 'voc':
        pass
        # train_dataset = gdata.VOCDetection(
        #     splits=[(2007, 'trainval'), (2012, 'trainval')])
        # val_dataset = gdata.VOCDetection(
        #     splits=[(2007, 'test')])
        # val_metric = VOC07MApMetric(iou_thresh=0.5, class_names=val_dataset.classes)
    elif dataset.lower() == 'coco':
        train_dataset = gdata.COCODetection(
            root='/home/xcq/PycharmProjects/datasets/coco/',
            splits='instances_train2017',
            use_crowd=False)
        val_dataset = gdata.COCODetection(
            root='/home/xcq/PycharmProjects/datasets/coco/',
            splits='instances_val2017',
            skip_empty=False)
        val_metric = COCODetectionMetric(val_dataset,
                                         args.save_prefix + '_eval',
                                         cleanup=True)
    else:
        raise NotImplementedError(
            'Dataset: {} not implemented.'.format(dataset))
    if args.mixup:
        from gluoncv.data.mixup import detection
        train_dataset = detection.MixupDetection(train_dataset)
    return train_dataset, val_dataset, val_metric
コード例 #8
0
def get_dataset(dataset, args):
    if dataset.lower() == 'voc':
        train_dataset = gdata.VOCDetection(
            splits=[(2007, 'trainval'), (2012, 'trainval')])
        val_dataset = gdata.VOCDetection(splits=[(2007, 'test')])
        val_metric = VOC07MApMetric(iou_thresh=0.5,
                                    class_names=val_dataset.classes)
    elif dataset.lower() == 'coco':
        train_dataset = gdata.COCODetection(root=args.dataset_root + "/coco",
                                            splits='instances_train2017')
        val_dataset = gdata.COCODetection(root=args.dataset_root + "/coco",
                                          splits='instances_val2017',
                                          skip_empty=False)
        val_metric = COCODetectionMetric(val_dataset,
                                         args.save_prefix + '_eval',
                                         cleanup=True,
                                         data_shape=(args.data_shape,
                                                     args.data_shape))
        # coco validation is slow, consider increase the validation interval
        if args.val_interval == 1:
            args.val_interval = 10
    else:
        train_dataset = petVOC(splits=[(2019, 'train_val')])
        val_dataset = train_dataset
        val_metric = VOC07MApMetric(iou_thresh=0.5,
                                    class_names=val_dataset.classes)

    return train_dataset, val_dataset, val_metric
コード例 #9
0
def get_dataset(dataset, args):
    if dataset.lower() == 'voc':
        #train_dataset = VOCLike(root='/opt/ml/input/data/training', splits=((2019, 'train'),))
        #val_dataset = VOCLike(root='/opt/ml/input/data/training', splits=((2018, 'val'),))
        train_dataset = VOCLike(
            root='~/code/gluoncv-yolo-playing_cards/VOCTemplate',
            splits=((2019, 'train'), ))
        val_dataset = VOCLike(
            root='~/code/gluoncv-yolo-playing_cards/VOCTemplate',
            splits=((2018, 'val'), ))
        val_metric = VOC07MApMetric(iou_thresh=0.5,
                                    class_names=val_dataset.classes)
    elif dataset.lower() == 'coco':
        train_dataset = gdata.COCODetection(splits='instances_train2017',
                                            use_crowd=False)
        val_dataset = gdata.COCODetection(splits='instances_val2017',
                                          skip_empty=False)
        val_metric = COCODetectionMetric(val_dataset,
                                         args.save_prefix + '_eval',
                                         cleanup=True,
                                         data_shape=(args.data_shape,
                                                     args.data_shape))
    else:
        raise NotImplementedError(
            'Dataset: {} not implemented.'.format(dataset))
    if args.num_samples < 0:
        args.num_samples = len(train_dataset)
    if args.mixup:
        from gluoncv.data import MixupDetection
        train_dataset = MixupDetection(train_dataset)
    return train_dataset, val_dataset, val_metric
コード例 #10
0
def get_dataset(dataset, args):
    if dataset.lower() == 'voc':
        train_dataset = gdata.VOCDetection(
            splits=[(2007, 'trainval'), (2012, 'trainval')])
        val_dataset = gdata.VOCDetection(splits=[(2007, 'test')])
        val_metric = VOC07MApMetric(iou_thresh=0.5,
                                    class_names=val_dataset.classes)
    elif dataset.lower() in ['clipart', 'comic', 'watercolor']:
        root = os.path.join('~', '.mxnet', 'datasets', dataset.lower())
        train_dataset = gdata.CustomVOCDetection(root=root,
                                                 splits=[('', 'train')],
                                                 generate_classes=True)
        val_dataset = gdata.CustomVOCDetection(root=root,
                                               splits=[('', 'test')],
                                               generate_classes=True)
        val_metric = VOC07MApMetric(iou_thresh=0.5,
                                    class_names=val_dataset.classes)
    elif dataset.lower() == 'coco':
        train_dataset = gdata.COCODetection(splits='instances_train2017',
                                            use_crowd=False)
        val_dataset = gdata.COCODetection(splits='instances_val2017',
                                          skip_empty=False)
        val_metric = COCODetectionMetric(val_dataset,
                                         args.save_prefix + '_eval',
                                         cleanup=True)
    else:
        raise NotImplementedError(
            'Dataset: {} not implemented.'.format(dataset))
    if args.mixup:
        from gluoncv.data.mixup import detection
        train_dataset = detection.MixupDetection(train_dataset)
    return train_dataset, val_dataset, val_metric
コード例 #11
0
def get_dataset(dataset, args):
    if dataset.lower() == 'voc':
        train_dataset = gdata.VOCDetection(
            root='/home/users/chenxin.lu/VOCdevkit/VOCdevkit',
            splits=[(2007, 'trainval'), (2012, 'trainval')])
        val_dataset = gdata.VOCDetection(splits=[(2007, 'test')])
        val_metric = VOC07MApMetric(iou_thresh=0.5,
                                    class_names=val_dataset.classes)
    elif dataset.lower() == 'coco':
        train_dataset = gdata.COCODetection(
            root=args.dataset_root + "/coco/stuff_annotations_trainval2017",
            splits='stuff_train2017')
        val_dataset = gdata.COCODetection(
            root=args.dataset_root + "/coco/stuff_annotations_trainval2017",
            splits='stuff_val2017',
            skip_empty=False)
        val_metric = COCODetectionMetric(val_dataset,
                                         args.save_prefix + '_eval',
                                         cleanup=True,
                                         data_shape=(args.data_shape,
                                                     args.data_shape),
                                         post_affine=get_post_transform)
        # coco validation is slow, consider increase the validation interval
        if args.val_interval == 1:
            args.val_interval = 10
    else:
        raise NotImplementedError(
            'Dataset: {} not implemented.'.format(dataset))
    if args.num_samples < 0:
        args.num_samples = len(train_dataset)
    return train_dataset, val_dataset, val_metric
コード例 #12
0
def get_dataset(dataset, args):
    if dataset.lower() == 'voc':
        train_dataset = VOCLike(root='/opt/ml/input/data/training',
                                splits=(('VOCTrain', 'train'), ))
        val_dataset = VOCLike(root='/opt/ml/input/data/training',
                              splits=(('VOCValidate', 'val'), ))
        #train_dataset = VOCLike(root='VOC-PlayingCards', splits=(('VOC2019', 'train'),))
        #val_dataset = VOCLike(root='VOC-PlayingCards', splits=(('VOC2018', 'val'),))
        val_metric = VOC07MApMetric(iou_thresh=0.5,
                                    class_names=val_dataset.classes)
        for c in val_metric.class_names:
            print("Class: {}".format(c))
    elif dataset.lower() == 'coco':
        train_dataset = gdata.COCODetection(root=args.dataset_root + "/coco",
                                            splits='instances_train2017')
        val_dataset = gdata.COCODetection(root=args.dataset_root + "/coco",
                                          splits='instances_val2017',
                                          skip_empty=False)
        val_metric = COCODetectionMetric(val_dataset,
                                         args.save_prefix + '_eval',
                                         cleanup=True,
                                         data_shape=(args.data_shape,
                                                     args.data_shape))
        # coco validation is slow, consider increase the validation interval
        if args.val_interval == 1:
            args.val_interval = 10
    else:
        raise NotImplementedError(
            'Dataset: {} not implemented.'.format(dataset))
    return train_dataset, val_dataset, val_metric
コード例 #13
0
def get_dataset(dataset, args):
    if dataset.lower() == 'voc':
        train_dataset = VOCLike(
            root="/content/drive/My Drive/Research/Dataset_conversion/Dataset/",
            splits=[(2007, 'train')])
        val_dataset = VOCLike(
            root="/content/drive/My Drive/Research/Dataset_conversion/Dataset/",
            splits=[(2007, 'validation')])
        print(train_dataset.classes)
        print(val_dataset.classes)
        val_metric = VOC07MApMetric(iou_thresh=0.5,
                                    class_names=val_dataset.classes)
    elif dataset.lower() == 'coco':
        train_dataset = gdata.COCODetection(splits='instances_train2017',
                                            use_crowd=False)
        val_dataset = gdata.COCODetection(splits='instances_val2017',
                                          skip_empty=False)
        val_metric = COCODetectionMetric(val_dataset,
                                         args.save_prefix + '_eval',
                                         cleanup=True)
    else:
        raise NotImplementedError(
            'Dataset: {} not implemented.'.format(dataset))
    if args.mixup:
        from gluoncv.data.mixup import detection
        train_dataset = detection.MixupDetection(train_dataset)
    return train_dataset, val_dataset, val_metric
コード例 #14
0
def get_dataset(args):
    train_dataset = gdata.COCODetection(
        root=args.datasetloc, splits="instances_train2017", use_crowd=False, skip_empty=True
    )
    val_dataset = gdata.COCODetection(
        root=args.datasetloc, splits="instances_val2017", skip_empty=False
    )
    val_metric = COCODetectionMetric(val_dataset, args.save_prefix + "_eval")
    return train_dataset, val_dataset, val_metric
コード例 #15
0
ファイル: coco.py プロジェクト: mseeger/autogluon-1
    def __init__(self):
        super(COCO.self).__init__()
        self.train_dataset = gdata.COCODetection(splits='instances_train2017')
        self.val_dataset = gdata.COCODetection(splits='instances_val2017',
                                               skip_empty=False)
        self.val_metric = COCODetectionMetric(self.val_dataset,
                                              args.save_prefix + '_eval',
                                              cleanup=True,
                                              data_shape=(args.data_shape,
                                                          args.data_shape))

        #TODO: whether to use the code below
        """
コード例 #16
0
def get_dataset(dataset, args):
    if dataset.lower() == 'voc':
        train_dataset = gdata.VOCDetection(
            splits=[(2007, 'trainval'), (2012, 'trainval')])
        val_dataset = gdata.VOCDetection(
            splits=[(2007, 'test')])
        val_metric = VOC07MApMetric(iou_thresh=0.5, class_names=val_dataset.classes)
    elif dataset.lower() == 'coco':
        train_dataset = gdata.COCODetection(splits='instances_train2017')
        val_dataset = gdata.COCODetection(splits='instances_val2017', skip_empty=False)
        val_metric = COCODetectionMetric(val_dataset, args.save_prefix + '_eval', cleanup=True)
    else:
        raise NotImplementedError('Dataset: {} not implemented.'.format(dataset))
    return train_dataset, val_dataset, val_metric
コード例 #17
0
def get_dali_dataset(dataset_name, devices, args):
    if dataset_name.lower() == "coco":
        # training
        expanded_file_root = os.path.expanduser(args.dataset_root)
        coco_root = os.path.join(expanded_file_root,
                                 'coco',
                                 'train2017')
        coco_annotations = os.path.join(expanded_file_root,
                                        'coco',
                                        'annotations',
                                        'instances_train2017.json')
        if args.horovod:
            train_dataset = [gdata.COCODetectionDALI(num_shards=hvd.size(), shard_id=hvd.rank(), file_root=coco_root,
                                                     annotations_file=coco_annotations, device_id=hvd.local_rank())]
        else:
            train_dataset = [gdata.COCODetectionDALI(num_shards= len(devices), shard_id=i, file_root=coco_root,
                                                     annotations_file=coco_annotations, device_id=i) for i, _ in enumerate(devices)]

        # validation
        if (not args.horovod or hvd.rank() == 0):
            val_dataset = gdata.COCODetection(root=os.path.join(args.dataset_root + '/coco'),
                                              splits='instances_val2017',
                                              skip_empty=False)
            val_metric = COCODetectionMetric(
                val_dataset, args.save_prefix + '_eval', cleanup=True,
                data_shape=(args.data_shape, args.data_shape))
        else:
            val_dataset = None
            val_metric = None
    else:
        raise NotImplementedError('Dataset: {} not implemented with DALI.'.format(dataset_name))

    return train_dataset, val_dataset, val_metric
コード例 #18
0
def get_dataset(dataset, args):
    if dataset.lower() == 'voc':
        train_dataset = gdata.VOCDetection(
            splits=[(2007, 'trainval'), (2012, 'trainval')])
        val_dataset = gdata.VOCDetection(splits=[(2007, 'test')])
        val_metric = VOC07MApMetric(iou_thresh=0.5,
                                    class_names=val_dataset.classes)
    elif dataset.lower() == 'coco':
        train_dataset = gdata.COCODetection(root=args.dataset_root + "/coco",
                                            splits='instances_train2017')
        val_dataset = gdata.COCODetection(root=args.dataset_root + "/coco",
                                          splits='instances_val2017',
                                          skip_empty=False)
        val_metric = COCODetectionMetric(val_dataset,
                                         args.save_prefix + '_eval',
                                         cleanup=True,
                                         data_shape=(args.data_shape,
                                                     args.data_shape))
        # coco validation is slow, consider increase the validation interval
        if args.val_interval == 1:
            args.val_interval = 10
    elif dataset.lower() == 'nzrc':
        # The classes for the dataset need to be reset after net is loaded to prevent a classes mismatch errors when loading net.
        gdata.COCODetection.CLASSES = classes
        print("train_efficirntdet.py-50 get_dataset CLASSES=",
              gdata.COCODetection.CLASSES)
        train_dataset = gdata.COCODetection(root=args.dataset_root +
                                            "/NZRC/ML4DR_v2",
                                            splits='coco_export2_train')
        val_dataset = gdata.COCODetection(root=args.dataset_root +
                                          "/NZRC/ML4DR_v2",
                                          splits='coco_export2_val',
                                          skip_empty=False)
        val_metric = COCODetectionMetric(val_dataset,
                                         args.save_prefix + '_eval',
                                         cleanup=True,
                                         data_shape=(args.data_shape,
                                                     args.data_shape))
        # coco validation is slow, consider increase the validation interval
        if args.val_interval == 1:
            args.val_interval = 10
    else:
        raise NotImplementedError(
            'Dataset: {} not implemented.'.format(dataset))
    if args.num_samples < 0:
        args.num_samples = len(train_dataset)
    return train_dataset, val_dataset, val_metric
コード例 #19
0
def get_dataset(dataset, args):
    if dataset.lower() == 'voc':

        #        train_dataset = gdata.VOCDetection(
        #            splits=[(2007, 'trainval'), (2012, 'trainval')])
        #        val_dataset = gdata.VOCDetection(
        #            splits=[(2007, 'test')])
        #        val_metric = VOC07MApMetric(iou_thresh=0.5, class_names=val_dataset.classes)

        root = '/home/wangqiushi/pycode/datasets/DR4/'
        image_root = os.path.join(root, 'region_pics')
        #image_root = '/'
        train_label_file = os.path.join(root, 'train.txt')
        val_label_file = os.path.join(root, 'valid.txt')
        classes = ('40', '50')
        map_file = os.path.join(root, 'DR2_map.txt')

        train_dataset = DetectionDataset(image_root,
                                         train_label_file,
                                         classes,
                                         map_file=map_file)
        val_dataset = DetectionDataset(image_root,
                                       val_label_file,
                                       classes,
                                       map_file=map_file,
                                       shuffle=False)
        val_metric = VOC07MApMetric(iou_thresh=0.5,
                                    class_names=val_dataset.classes)

    elif dataset.lower() == 'coco':
        train_dataset = gdata.COCODetection(splits='instances_train2017')
        val_dataset = gdata.COCODetection(splits='instances_val2017',
                                          skip_empty=False)
        val_metric = COCODetectionMetric(val_dataset,
                                         args.save_prefix + '_eval',
                                         cleanup=True,
                                         data_shape=(args.data_shape,
                                                     args.data_shape))
        # coco validation is slow, consider increase the validation interval
        if args.val_interval == 1:
            args.val_interval = 10

    else:
        raise NotImplementedError(
            'Dataset: {} not implemented.'.format(dataset))
    return train_dataset, val_dataset, val_metric
コード例 #20
0
def get_dataset(dataset, args):
    if dataset.lower() == 'coco':
        train_dataset = gdata.COCODetection(root='./data/coco', splits='instances_train2017', use_crowd=False)
    else:
        raise NotImplementedError('Dataset: {} not implemented.'.format(dataset))
    if args.num_samples < 0:
        args.num_samples = len(train_dataset)
    if args.mixup:  # is broken now. do not try
        from gluoncv.data import MixupDetection
        train_dataset = MixupDetection(train_dataset)
    return train_dataset
コード例 #21
0
def test_coco_detection():
    if not osp.isdir(osp.expanduser('~/.mxnet/datasets/coco')):
        return

    # use valid only, loading training split is very slow
    val = data.COCODetection(splits=('instances_val2017'))
    name = str(val)
    assert len(val.classes) > 0

    for _ in range(10):
        index = np.random.randint(0, len(val))
        _ = val[index]
コード例 #22
0
def get_dataset(dataset, args):
    if dataset.lower() == 'voc':
        root = '/data02/wangqiushi/datasets/DR/'
        image_root = os.path.join(root, 'Images')
        train_label_file = os.path.join(root, 'mxnet_rec', 't90', 'train.txt')
        val_label_file = os.path.join(root, 'mxnet_rec', 't90', 'valid.txt')
        classes = ('30', '40', '50')
        map_file = os.path.join(root, 'mxnet_rec', 'DR_map.txt')

        train_dataset = DetectionDataset(image_root,
                                         train_label_file,
                                         classes,
                                         map_file=map_file)
        val_dataset = DetectionDataset(image_root,
                                       val_label_file,
                                       classes,
                                       map_file=map_file,
                                       shuffle=False)
        val_metric = VOC07MApMetric(iou_thresh=0.5,
                                    class_names=val_dataset.classes)
    elif dataset.lower() == 'coco':
        train_dataset = gdata.COCODetection(splits='instances_train2017',
                                            use_crowd=False)
        val_dataset = gdata.COCODetection(splits='instances_val2017',
                                          skip_empty=False)
        val_metric = COCODetectionMetric(val_dataset,
                                         args.save_prefix + '_eval',
                                         cleanup=True,
                                         data_shape=(args.data_shape,
                                                     args.data_shape))
    else:
        raise NotImplementedError(
            'Dataset: {} not implemented.'.format(dataset))
    if args.num_samples < 0:
        args.num_samples = len(train_dataset)
    if args.mixup:
        from gluoncv.data import MixupDetection
        train_dataset = MixupDetection(train_dataset)
    return train_dataset, val_dataset, val_metric
コード例 #23
0
def get_dataset(dataset, args):
    if dataset.lower() == 'voc':
        train_dataset = gdata.VOCDetection(
            splits=[(2007, 'trainval'), (2012, 'trainval')])
        val_dataset = gdata.VOCDetection(splits=[(2007, 'test')])
        val_metric = VOC07MApMetric(iou_thresh=0.5,
                                    class_names=val_dataset.classes)
    elif dataset.lower() == 'coco':
        train_dataset = gdata.COCODetection(splits='instances_train2017',
                                            use_crowd=False)
        val_dataset = gdata.COCODetection(splits='instances_val2017',
                                          skip_empty=False)
        val_metric = COCODetectionMetric(val_dataset,
                                         os.path.join(args.logdir, 'eval'),
                                         cleanup=True)
    else:
        raise NotImplementedError(
            'Dataset: {} not implemented.'.format(dataset))
    if cfg.TRAIN.MODE_MIXUP:
        from gluoncv.data.mixup import MixupDetection
        train_dataset = MixupDetection(train_dataset)
    return train_dataset, val_dataset, val_metric
コード例 #24
0
def get_dataset(dataset, args):
    if dataset.lower() == 'voc':
        train_dataset = gdata.VOCDetection(root=args.data_path,
            splits=[(2007, 'trainval'), (2012, 'trainval')])
        val_dataset = gdata.VOCDetection(root=args.data_path,
            splits=[(2007, 'test')])
        val_metric = VOC07MApMetric(iou_thresh=0.5, class_names=val_dataset.classes)
    elif dataset.lower() == 'coco':
        #train_dataset = gdata.COCODetection(splits='instances_train2014', use_crowd=False)
        train_dataset = gdata.COCODetection(root=args.data_path, splits='instances_train2017')
        val_dataset = gdata.COCODetection(root=args.data_path, splits='instances_val2017', skip_empty=False)
        val_metric = COCODetectionMetric(val_dataset, args.save_prefix + '_eval', cleanup=True)
    elif dataset.lower() == 'rec':
        train_dataset = gdata.RecordFileDetection(os.path.join(args.data_path, 'pikachu_train.rec'))
        val_dataset = gdata.RecordFileDetection(os.path.join(args.data_path, 'pikachu_train.rec'))
        val_metric = VOC07MApMetric(iou_thresh=0.5, class_names=rec_classes)
    else:
        raise NotImplementedError('Dataset: {} not implemented.'.format(dataset))
    if args.mixup:
        from gluoncv.data.mixup import MixupDetection
        train_dataset = MixupDetection(train_dataset)
    return train_dataset, val_dataset, val_metric
コード例 #25
0
def get_dataset(dataset, args):
    if dataset.lower() == 'voc':
        train_dataset = VOCLike(root='clothes_data',
                                splits=((2018, 'train'), ))
        print(train_dataset)
        val_dataset = VOCLike(root='clothes_data', splits=((2018, 'test'), ))
        val_metric = VOC07MApMetric(iou_thresh=0.5,
                                    class_names=val_dataset.classes)
    elif dataset.lower() == 'coco':
        train_dataset = gdata.COCODetection(splits='instances_train2017',
                                            use_crowd=False)
        val_dataset = gdata.COCODetection(splits='instances_val2017',
                                          skip_empty=False)
        val_metric = COCODetectionMetric(val_dataset,
                                         args.save_prefix + '_eval',
                                         cleanup=True)
    else:
        raise NotImplementedError(
            'Dataset: {} not implemented.'.format(dataset))
    if args.mixup:
        from gluoncv.data.mixup import detection
        train_dataset = detection.MixupDetection(train_dataset)
    return train_dataset, val_dataset, val_metric
コード例 #26
0
 def _load_data(self):
     assert len(self.ishape) == 4
     N, C, H, W = self.ishape
     assert C == 3
     self.val_dataset = gdata.COCODetection(root=self.root_dir,
                                            splits='instances_val2017',
                                            skip_empty=False)
     val_batchify_fn = Tuple(Stack(), Pad(pad_val=-1))
     self.data = gluon.data.DataLoader(self.val_dataset.transform(
         SSDDefaultValTransform(W, H)),
                                       batch_size=N,
                                       shuffle=False,
                                       batchify_fn=val_batchify_fn,
                                       last_batch='rollover',
                                       num_workers=30)
コード例 #27
0
    def __init__(
            self,
            data_path           : str = '/media/david/A/Dataset/COCO',  # TODO change data path
            record_dir          : str = None,
            record_name         : str = None,
            save_n_test_images  : int = None,
            save_n_train_images : int = None,
    ):
        assert os.path.exists(record_dir)
        assert record_name

        self._record_dir  = record_dir
        self._record_name = record_name

        self._train_record = os.path.join(self._record_dir, self._record_name + '_train' + '.tfrecord')
        self._test_record  = os.path.join(self._record_dir, self._record_name + '_test' + '.tfrecord')

        if not os.path.exists(self._test_record):
            test_dataset = data.COCODetection(root=data_path, splits=['instances_val2017'])
            self.create_record(test_dataset, self._test_record, save_n_test_images)

        if not os.path.exists(self._train_record):
            train_dataset = data.COCODetection(root=data_path, splits=['instances_train2017'])
            self.create_record(train_dataset, self._train_record, save_n_train_images)
コード例 #28
0
    def _load_data(self):
        """ Customized _load_data method introduction.

            COCO dataset only support layout of NCHW and the number of channels must be 3, i.e. (batch_size, 3, input_size, input_size).

            The validation dataset will be created by *MS COCO Detection Dataset* and use SSDDefaultValTransform as data preprocess function.
        """
        assert len(self.ishape) == 4
        N, C, H, W = self.ishape
        assert C == 3
        self.val_dataset = gdata.COCODetection(
            root=self.root_dir, splits='instances_val2017', skip_empty=False)
        val_batchify_fn = Tuple(Stack(), Pad(pad_val=-1))
        self.data = gluon.data.DataLoader(
            self.val_dataset.transform(SSDDefaultValTransform(W, H)),
            batch_size=N, shuffle=False, batchify_fn=val_batchify_fn,
            last_batch='rollover', num_workers=30)
コード例 #29
0
ファイル: eval_ssd.py プロジェクト: mbasnet1/lpot
def get_dataset(dataset, data_shape):
    args = parse_args()
    dataset_location = args.dataset_location
    if dataset.lower() == 'voc':
        val_dataset = gdata.VOCDetection(root=dataset_location,
                                         splits=[(2007, 'test')])
        val_metric = VOC07MApMetric(iou_thresh=0.5,
                                    class_names=val_dataset.classes)
    elif dataset.lower() == 'coco':
        val_dataset = gdata.COCODetection(root=dataset_location,
                                          splits='instances_val2017',
                                          skip_empty=False)
        val_metric = COCODetectionMetric(val_dataset,
                                         args.save_prefix + '_eval',
                                         cleanup=True,
                                         data_shape=(data_shape, data_shape))
    else:
        raise NotImplementedError(
            'Dataset: {} not implemented.'.format(dataset))
    return val_dataset, val_metric
コード例 #30
0
ファイル: mscoco.py プロジェクト: xcgoner/gluon-exp
   python mscoco.py --download-dir ~/coco

"""

################################################################
# Read with GluonCV
# -----------------
#
# Loading images and labels is straight-forward with
# :py:class:`gluoncv.data.COCODetection`.

from gluoncv import data, utils
from matplotlib import pyplot as plt

train_dataset = data.COCODetection(splits=['instances_train2017'])
val_dataset = data.COCODetection(splits=['instances_val2017'])
print('Num of training images:', len(train_dataset))
print('Num of validation images:', len(val_dataset))

################################################################
# Now let's visualize one example.

train_image, train_label = train_dataset[0]
bounding_boxes = train_label[:, :4]
class_ids = train_label[:, 4:5]
print('Image size (height, width, RGB):', train_image.shape)
print('Num of objects:', bounding_boxes.shape[0])
print('Bounding boxes (num_boxes, x_min, y_min, x_max, y_max):\n',
      bounding_boxes)
print('Class IDs (num_boxes, ):\n', class_ids)