def get_dataset(dataset, args):
    if dataset.lower() == 'coco':
        train_dataset = gdata.COCOInstance(root='/home/xcq/PycharmProjects/datasets/coco/',splits='instances_train2017')
        val_dataset = gdata.COCOInstance(root='/home/xcq/PycharmProjects/datasets/coco/',splits='instances_val2017', skip_empty=False)
        val_metric = COCOInstanceMetric(val_dataset, args.save_prefix + '_eval', cleanup=True)
    else:
        raise NotImplementedError('Dataset: {} not implemented.'.format(dataset))
    return train_dataset, val_dataset, val_metric
Exemplo n.º 2
0
def get_dataset(dataset, args):
    if dataset.lower() == 'coco':
        val_dataset = gdata.COCOInstance(splits='instances_val2017', skip_empty=False)
        val_metric = COCOInstanceMetric(val_dataset, args.save_prefix + '_eval',
                                        cleanup=not args.save_json)
    else:
        raise NotImplementedError('Dataset: {} not implemented.'.format(dataset))
    return val_dataset, val_metric
Exemplo n.º 3
0
def get_dataset(dataset, args):
    if dataset.lower() == 'coco':
        train_dataset = gdata.COCOInstance(splits='instances_train2017')
        val_dataset = gdata.COCOInstance(splits='instances_val2017', skip_empty=False)
        starting_id = 0
        if args.horovod and MPI:
            length = len(val_dataset)
            shard_len = length // hvd.size()
            rest = length % hvd.size()
            # Compute the start index for this partition
            starting_id = shard_len * hvd.rank() + min(hvd.rank(), rest)
        val_metric = COCOInstanceMetric(val_dataset, args.save_prefix + '_eval',
                                        use_ext=args.use_ext, starting_id=starting_id)
    else:
        raise NotImplementedError('Dataset: {} not implemented.'.format(dataset))
    if args.horovod and MPI:
        val_dataset = val_dataset.shard(hvd.size(), hvd.rank())
    return train_dataset, val_dataset, val_metric
Exemplo n.º 4
0
def get_dataset(args):
    train_dataset = gdata.COCOInstance(root=args.datasetloc,
                                       splits='instances_train2017')
    val_dataset = gdata.COCOInstance(root=args.datasetloc,
                                     splits='instances_val2017',
                                     skip_empty=False)
    starting_id = 0
    if args.horovod and MPI:
        length = len(val_dataset)
        shard_len = length // hvd.size()
        rest = length % hvd.size()
        # Compute the start index for this partition
        starting_id = shard_len * hvd.rank() + min(hvd.rank(), rest)
    val_metric = COCOInstanceMetric(val_dataset,
                                    args.save_prefix + '_eval',
                                    use_ext=False,
                                    starting_id=starting_id)
    if args.horovod and MPI:
        val_dataset = val_dataset.shard(hvd.size(), hvd.rank())
    return (train_dataset, val_dataset, val_metric)
Exemplo n.º 5
0
def test_coco_instance():
    if not osp.isdir(osp.expanduser('~/.mxnet/datasets/coco')):
        return

    # use valid only, loading training split is very slow
    val = data.COCOInstance(splits=('instances_val2017',))
    name = str(val)
    assert len(val.classes) > 0

    for _ in range(10):
        index = np.random.randint(0, len(val))
        _ = val[index]
def get_dataset(dataset, args):
    if dataset.lower() == 'coco':
        train_dataset = gdata.COCOInstance(splits='instances_train2017')
        val_dataset = gdata.COCOInstance(splits='instances_val2017',
                                         skip_empty=False)
        val_metric = COCOInstanceMetric(val_dataset,
                                        args.save_prefix + '_eval',
                                        cleanup=True)
    elif dataset.lower() == 'imaterialist':
        from utils.iMaterialistDataset import COCOiMaterialist
        train_dataset = COCOiMaterialist(root='datasets/imaterialist/',
                                         splits='rle_instances_train')
        val_dataset = COCOiMaterialist(root='datasets/imaterialist/',
                                       splits='resize_rle_instances_val',
                                       skip_empty=False)
        val_metric = COCOInstanceMetric(val_dataset,
                                        args.save_prefix + '_eval',
                                        cleanup=True)
    else:
        raise NotImplementedError(
            'Dataset: {} not implemented.'.format(dataset))
    return train_dataset, val_dataset, val_metric
Exemplo n.º 7
0
def get_dataset(dataset, args):
    if dataset.lower() == 'coco':
        train_dataset = gdata.COCOInstance(splits='instances_train2017')
        val_dataset = gdata.COCOInstance(splits='instances_val2017',
                                         skip_empty=False)
        val_metric = COCOInstanceMetric(val_dataset,
                                        args.save_prefix + '_eval',
                                        cleanup=True)
    elif dataset.lower == "voc":
        input_transform = transforms.Compose([
            transforms.ToTensor(),
            transforms.Normalize([.485, .456, .406], [.229, .224, .225]),
        ])
        train_dataset = gdata.VOCSegmentation(split="train",
                                              root=args.data_dir,
                                              transform=input_transform)
        val_dataset = gdata.VOCSegmentation(split="val",
                                            root=args.data_dir,
                                            transform=input_transform)
        val_metric = SegmentationMetric(train_dataset.num_class)
    else:
        raise NotImplementedError(
            'Dataset: {} not implemented.'.format(dataset))
    return train_dataset, val_dataset, val_metric
Exemplo n.º 8
0
def get_dataset(dataset, args):
    if dataset.lower() == 'blbalbla':
        train_dataset = gdata.COCOInstance(splits='instances_train2017')
        val_dataset = gdata.COCOInstance(splits='instances_val2017',
                                         skip_empty=False)
        val_metric = COCOInstanceMetric(val_dataset,
                                        args.save_prefix + '_eval',
                                        cleanup=True)

        print(val_metric)
    elif dataset.lower() == 'coco':

        # normal loading

        annotations_train = json.load(
            open('/home/nexus/primates/annotations/annot_train.json'))
        annotations_val = json.load(
            open('/home/nexus/primates/annotations/annot_test.json'))

        img_path_train = '/home/nexus/primates/train/'
        img_path_val = '/home/nexus/primates/test/'

        train_set = []

        def jsonToSet(annotations_train_1, img_path):
            for key in tqdm(annotations_train_1.keys()):
                el = annotations_train_1[key]

                img = mx.nd.array(
                    imread(img_path + key.split('.')[0] +
                           '.png').astype('float32'))

                img_annot = []
                masks = []
                for reg in el['regions']:
                    x = reg['shape_attributes']['all_points_x']
                    y = reg['shape_attributes']['all_points_y']

                    _mask = [
                        np.swapaxes(np.asarray([x, y]).astype('float32'),
                                    axis1=0,
                                    axis2=1)
                    ]

                    masks.append(_mask)

                    #             print(masks[-1])
                    mask = load_mask(img.asnumpy(),
                                     np.expand_dims(_mask, axis=-1))
                    #             print(mask[0].shape)
                    bboxes = extract_bboxes(mask)
                    #             print(bboxes)
                    bboxes = [[
                        bboxes[0][1], bboxes[0][0], bboxes[0][3], bboxes[0][2]
                    ]]
                    #             print(bboxes)
                    annot = np.append(bboxes, [0.0])
                    img_annot.append(annot.astype('float32'))

                annot = np.asarray(img_annot).astype('float32')

                #         mask = load_mask(img, masks)
                #         bboxes = extract_bboxes(mask)

                train_set.append((img, annot, masks))
            return train_set

        train_dataset = gdata.COCOInstance(splits='instances_train2017')
        val_dataset = gdata.COCOInstance(splits='instances_val2017',
                                         skip_empty=False)
        val_metric = COCOInstanceMetric(val_dataset,
                                        args.save_prefix + '_eval',
                                        cleanup=True)

        train_dataset = jsonToSet(annotations_train, img_path_train)
        val_dataset = jsonToSet(annotations_val, img_path_train)

        from mxnet.gluon.data.dataset import ArrayDataset

        train_dataset = ArrayDataset(train_dataset)
        val_dataset = ArrayDataset(val_dataset)

        #from gluoncv.data.batchify import Tuple, Append, MaskRCNNTrainBatchify

        #train_transform = presets.rcnn.MaskRCNNDefaultTrainTransform(short, max_size, net)
        #train_dataset = train_dataset.transform(train_transform)
        #val_dataset = val_dataset.transform(train_transform)

    else:
        raise NotImplementedError(
            'Dataset: {} not implemented.'.format(dataset))
    return train_dataset, val_dataset, val_metric
Exemplo n.º 9
0



# init params
ctx = mx.cpu()
model_path = "/Users/rensike/.mxnet/models/mask_rcnn_resnet50_v1b_coco-a3527fdc.params"
num_workers = 0

# init model
net = get_model("mask_rcnn_resnet50_v1b_coco",pretrained=False,pretrained_base=False)
net.load_parameters(model_path)
net.collect_params().reset_ctx(ctx)

# load val dataset
val_dataset = gdata.COCOInstance(splits='instances_val',root="/Users/rensike/Files/temp/coco_mini", skip_empty=False)
# val_dataset = gdata.VOCSegmentation(root="/Users/rensike/Files/temp/voc_mini",split="val")
# eval_metric = SegmentationMetric(nclass=val_dataset.num_class)
eval_metric = COCOInstanceMetric(val_dataset,"coco_eval")

# load val dataloader
val_bfn = batchify.Tuple(*[batchify.Append() for _ in range(2)])
val_data_loader = mx.gluon.data.DataLoader(
    val_dataset.transform(MaskRCNNDefaultValTransform(net.short, net.max_size)),
    1, False, batchify_fn=val_bfn, last_batch='keep', num_workers=num_workers)

# do evaluate
eval_metric.reset()
net.hybridize(static_alloc=True)

names, values = validate(net, val_data_loader, [ctx], eval_metric,len(val_dataset))