Ejemplo n.º 1
0
def get_dataset(dataset, args):
    if dataset.lower() == 'coco':
        val_dataset = gdata.COCOInstance(splits='instances_val2017', skip_empty=False)
        val_metric = COCOInstanceMetric(val_dataset, args.save_prefix + '_eval',
                                        cleanup=not args.save_json)
    else:
        raise NotImplementedError('Dataset: {} not implemented.'.format(dataset))
    return val_dataset, val_metric
Ejemplo n.º 2
0
def get_dataset(dataset, args):
    if dataset.lower() == 'coco':
        train_dataset = gdata.COCOInstance(root='/home/xcq/PycharmProjects/datasets/coco/',splits='instances_train2017')
        val_dataset = gdata.COCOInstance(root='/home/xcq/PycharmProjects/datasets/coco/',splits='instances_val2017', skip_empty=False)
        val_metric = COCOInstanceMetric(val_dataset, args.save_prefix + '_eval', cleanup=True)
    else:
        raise NotImplementedError('Dataset: {} not implemented.'.format(dataset))
    return train_dataset, val_dataset, val_metric
Ejemplo n.º 3
0
def get_dataset(dataset, args):
    if dataset.lower() == 'coco':
        train_dataset = gdata.UsCOCOInstance(splits='train')
        val_dataset = gdata.UsCOCOInstance(splits='valid', skip_empty=False)
        val_metric = COCOInstanceMetric(val_dataset,
                                        args.save_prefix + '_eval',
                                        cleanup=True)
    else:
        raise NotImplementedError(
            'Dataset: {} not implemented.'.format(dataset))
    return train_dataset, val_dataset, val_metric
Ejemplo n.º 4
0
def get_dataset(dataset, args):
    if dataset.lower() == 'coco':
        train_dataset = gdata.COCOInstance(splits='instances_train2017')
        val_dataset = gdata.COCOInstance(splits='instances_val2017',
                                         skip_empty=False)
        val_metric = COCOInstanceMetric(val_dataset,
                                        args.save_prefix + '_eval',
                                        cleanup=True)
    elif dataset.lower() == 'imaterialist':
        from utils.iMaterialistDataset import COCOiMaterialist
        train_dataset = COCOiMaterialist(root='datasets/imaterialist/',
                                         splits='rle_instances_train')
        val_dataset = COCOiMaterialist(root='datasets/imaterialist/',
                                       splits='resize_rle_instances_val',
                                       skip_empty=False)
        val_metric = COCOInstanceMetric(val_dataset,
                                        args.save_prefix + '_eval',
                                        cleanup=True)
    else:
        raise NotImplementedError(
            'Dataset: {} not implemented.'.format(dataset))
    return train_dataset, val_dataset, val_metric
Ejemplo n.º 5
0
def get_dataset(dataset, args):
    if dataset.lower() == 'voc':
        if args.val_voc2012:
            val_dataset = gdata.VOC_Val_Detection(
            splits=[('sbdche', 'val_2012_bboxwh')])
        else:
            val_dataset = gdata.VOC_Val_Detection(
                splits=[('sbdche', 'val'+'_'+'8'+'_bboxwh')])
        val_metric = VOC07MApMetric(iou_thresh=0.5, class_names=val_dataset.classes)
        val_polygon_metric = VOC07PolygonMApMetric(iou_thresh=0.5, class_names=val_dataset.classes)
    elif dataset.lower() == 'coco':
        val_dataset = COCOInstance(root='/home/tutian/dataset/', skip_empty=False)
        val_metric = COCOInstanceMetric(val_dataset, 'test_cocoapi', method='var')
        val_polygon_metric = None
    else:
        raise NotImplementedError('Dataset: {} not implemented.'.format(dataset))
    return val_dataset, val_metric, val_polygon_metric
Ejemplo n.º 6
0
def get_dataset(dataset, args):
    if dataset.lower() == 'coco':
        train_dataset = gdata.COCOInstance(splits='instances_train2017')
        val_dataset = gdata.COCOInstance(splits='instances_val2017', skip_empty=False)
        starting_id = 0
        if args.horovod and MPI:
            length = len(val_dataset)
            shard_len = length // hvd.size()
            rest = length % hvd.size()
            # Compute the start index for this partition
            starting_id = shard_len * hvd.rank() + min(hvd.rank(), rest)
        val_metric = COCOInstanceMetric(val_dataset, args.save_prefix + '_eval',
                                        use_ext=args.use_ext, starting_id=starting_id)
    else:
        raise NotImplementedError('Dataset: {} not implemented.'.format(dataset))
    if args.horovod and MPI:
        val_dataset = val_dataset.shard(hvd.size(), hvd.rank())
    return train_dataset, val_dataset, val_metric
Ejemplo n.º 7
0
def get_dataset(dataset, args):
    if dataset.lower() == 'coco':
        train_dataset = gdata.BDDInstance(
            root='/Volumes/DATASET/BDD100k/bdd100k/',
            splits='bdd100k_to_coco_labels_images_val2018',
            skip_empty=True,
            use_color_maps=False)
        val_dataset = gdata.BDDInstance(
            root='/Volumes/DATASET/BDD100k/bdd100k/',
            splits='bdd100k_to_coco_labels_images_val2018',
            skip_empty=False,
            use_color_maps=False)
        val_metric = COCOInstanceMetric(val_dataset,
                                        args.save_prefix + '_eval',
                                        cleanup=True)
    else:
        raise NotImplementedError(
            'Dataset: {} not implemented.'.format(dataset))
    return train_dataset, val_dataset, val_metric
Ejemplo n.º 8
0
def get_dataset(args):
    train_dataset = gdata.COCOInstance(root=args.datasetloc,
                                       splits='instances_train2017')
    val_dataset = gdata.COCOInstance(root=args.datasetloc,
                                     splits='instances_val2017',
                                     skip_empty=False)
    starting_id = 0
    if args.horovod and MPI:
        length = len(val_dataset)
        shard_len = length // hvd.size()
        rest = length % hvd.size()
        # Compute the start index for this partition
        starting_id = shard_len * hvd.rank() + min(hvd.rank(), rest)
    val_metric = COCOInstanceMetric(val_dataset,
                                    args.save_prefix + '_eval',
                                    use_ext=False,
                                    starting_id=starting_id)
    if args.horovod and MPI:
        val_dataset = val_dataset.shard(hvd.size(), hvd.rank())
    return (train_dataset, val_dataset, val_metric)
Ejemplo n.º 9
0
def get_dataset(dataset, args):
    if dataset.lower() == 'coco':
        train_dataset = gdata.COCOInstance(splits='instances_train2017')
        val_dataset = gdata.COCOInstance(splits='instances_val2017',
                                         skip_empty=False)
        val_metric = COCOInstanceMetric(val_dataset,
                                        args.save_prefix + '_eval',
                                        cleanup=True)
    elif dataset.lower == "voc":
        input_transform = transforms.Compose([
            transforms.ToTensor(),
            transforms.Normalize([.485, .456, .406], [.229, .224, .225]),
        ])
        train_dataset = gdata.VOCSegmentation(split="train",
                                              root=args.data_dir,
                                              transform=input_transform)
        val_dataset = gdata.VOCSegmentation(split="val",
                                            root=args.data_dir,
                                            transform=input_transform)
        val_metric = SegmentationMetric(train_dataset.num_class)
    else:
        raise NotImplementedError(
            'Dataset: {} not implemented.'.format(dataset))
    return train_dataset, val_dataset, val_metric
Ejemplo n.º 10
0
    if dataset.lower() == 'voc':
        if args.val_voc2012:
            val_dataset = gdata.VOC_Val_Detection(
            splits=[('sbdche', 'val_2012_bboxwh')])
        else:
            val_dataset = gdata.VOC_Val_Detection(
                splits=[('sbdche', 'val'+'_'+'8'+'_bboxwh')])
        val_metric = VOC07MApMetric(iou_thresh=0.5, class_names=val_dataset.classes)
        val_polygon_metric = VOC07PolygonMApMetric(iou_thresh=0.5, class_names=val_dataset.classes)
    elif dataset.lower() == 'coco':
<<<<<<< HEAD
        val_dataset = COCOInstance(root='/home/tutian/coco_val2017/', skip_empty=False)
=======
        val_dataset = COCOInstance(root='/home/tutian/dataset/', skip_empty=False)
>>>>>>> adf79f77d047abcde52e38fa36513e5b18d900e6
        val_metric = COCOInstanceMetric(val_dataset, 'test_cocoapi', method='var')
        val_polygon_metric = None
    else:
        raise NotImplementedError('Dataset: {} not implemented.'.format(dataset))
    return val_dataset, val_metric, val_polygon_metric

def get_dataloader(net, val_dataset, data_shape, batch_size, num_workers, args):
    """Get dataloader."""
    width, height = data_shape, data_shape
    # val_batchify_fn = Tuple(Stack(), Pad(pad_val=-1))

    # Copied from eval_mask_rcnn.py
    val_bfn = batchify.Tuple(*[batchify.Append() for _ in range(2)])
    val_loader = gluon.data.DataLoader(
        val_dataset.transform(YOLO3UsdSegCocoValTransform(width, height, 50, 'coco')),
        batch_size, False, batchify_fn=val_bfn, last_batch='keep', num_workers=num_workers)
Ejemplo n.º 11
0
def get_dataset(dataset, args):
    if dataset.lower() == 'blbalbla':
        train_dataset = gdata.COCOInstance(splits='instances_train2017')
        val_dataset = gdata.COCOInstance(splits='instances_val2017',
                                         skip_empty=False)
        val_metric = COCOInstanceMetric(val_dataset,
                                        args.save_prefix + '_eval',
                                        cleanup=True)

        print(val_metric)
    elif dataset.lower() == 'coco':

        # normal loading

        annotations_train = json.load(
            open('/home/nexus/primates/annotations/annot_train.json'))
        annotations_val = json.load(
            open('/home/nexus/primates/annotations/annot_test.json'))

        img_path_train = '/home/nexus/primates/train/'
        img_path_val = '/home/nexus/primates/test/'

        train_set = []

        def jsonToSet(annotations_train_1, img_path):
            for key in tqdm(annotations_train_1.keys()):
                el = annotations_train_1[key]

                img = mx.nd.array(
                    imread(img_path + key.split('.')[0] +
                           '.png').astype('float32'))

                img_annot = []
                masks = []
                for reg in el['regions']:
                    x = reg['shape_attributes']['all_points_x']
                    y = reg['shape_attributes']['all_points_y']

                    _mask = [
                        np.swapaxes(np.asarray([x, y]).astype('float32'),
                                    axis1=0,
                                    axis2=1)
                    ]

                    masks.append(_mask)

                    #             print(masks[-1])
                    mask = load_mask(img.asnumpy(),
                                     np.expand_dims(_mask, axis=-1))
                    #             print(mask[0].shape)
                    bboxes = extract_bboxes(mask)
                    #             print(bboxes)
                    bboxes = [[
                        bboxes[0][1], bboxes[0][0], bboxes[0][3], bboxes[0][2]
                    ]]
                    #             print(bboxes)
                    annot = np.append(bboxes, [0.0])
                    img_annot.append(annot.astype('float32'))

                annot = np.asarray(img_annot).astype('float32')

                #         mask = load_mask(img, masks)
                #         bboxes = extract_bboxes(mask)

                train_set.append((img, annot, masks))
            return train_set

        train_dataset = gdata.COCOInstance(splits='instances_train2017')
        val_dataset = gdata.COCOInstance(splits='instances_val2017',
                                         skip_empty=False)
        val_metric = COCOInstanceMetric(val_dataset,
                                        args.save_prefix + '_eval',
                                        cleanup=True)

        train_dataset = jsonToSet(annotations_train, img_path_train)
        val_dataset = jsonToSet(annotations_val, img_path_train)

        from mxnet.gluon.data.dataset import ArrayDataset

        train_dataset = ArrayDataset(train_dataset)
        val_dataset = ArrayDataset(val_dataset)

        #from gluoncv.data.batchify import Tuple, Append, MaskRCNNTrainBatchify

        #train_transform = presets.rcnn.MaskRCNNDefaultTrainTransform(short, max_size, net)
        #train_dataset = train_dataset.transform(train_transform)
        #val_dataset = val_dataset.transform(train_transform)

    else:
        raise NotImplementedError(
            'Dataset: {} not implemented.'.format(dataset))
    return train_dataset, val_dataset, val_metric
Ejemplo n.º 12
0
# init params
ctx = mx.cpu()
model_path = "/Users/rensike/.mxnet/models/mask_rcnn_resnet50_v1b_coco-a3527fdc.params"
num_workers = 0

# init model
net = get_model("mask_rcnn_resnet50_v1b_coco",pretrained=False,pretrained_base=False)
net.load_parameters(model_path)
net.collect_params().reset_ctx(ctx)

# load val dataset
val_dataset = gdata.COCOInstance(splits='instances_val',root="/Users/rensike/Files/temp/coco_mini", skip_empty=False)
# val_dataset = gdata.VOCSegmentation(root="/Users/rensike/Files/temp/voc_mini",split="val")
# eval_metric = SegmentationMetric(nclass=val_dataset.num_class)
eval_metric = COCOInstanceMetric(val_dataset,"coco_eval")

# load val dataloader
val_bfn = batchify.Tuple(*[batchify.Append() for _ in range(2)])
val_data_loader = mx.gluon.data.DataLoader(
    val_dataset.transform(MaskRCNNDefaultValTransform(net.short, net.max_size)),
    1, False, batchify_fn=val_bfn, last_batch='keep', num_workers=num_workers)

# do evaluate
eval_metric.reset()
net.hybridize(static_alloc=True)

names, values = validate(net, val_data_loader, [ctx], eval_metric,len(val_dataset))
for k, v in zip(names, values):
    print(k, v)