Exemplo n.º 1
0
def main():
    args = train_common.parse_args()

    args.logs_dir = osp.join(here, 'logs')

    # Dataset.
    args.dataset = 'arc2017_real'
    train_data = instance_occlsegm_lib.datasets.apc.\
        ARC2017InstanceSegmentationDataset(split='train', aug='standard')
    test_data = instance_occlsegm_lib.datasets.apc.\
        ARC2017InstanceSegmentationDataset(split='test')
    args.class_names = tuple(test_data.class_names)

    # Model.
    args.min_size = 600
    args.max_size = 1000
    args.anchor_scales = (4, 8, 16, 32)

    # Run training!.
    train_common.train(
        args=args,
        train_data=train_data,
        test_data=test_data,
        evaluator_type='coco',
    )
Exemplo n.º 2
0
def main():
    args = train_common.parse_args()

    args.logs_dir = osp.join(here, 'logs')

    # Dataset. For demonstration with few images, we use same dataset
    # for both train and test.
    root_dir = osp.join(
        here,
        'src/labelme/examples/instance_segmentation/data_dataset_voc',
    )
    args.dataset = 'custom'
    # 1 epoch = 3 images -> 60 images
    train_data = [VOCLikeDataset(root_dir=root_dir)] * 20
    train_data = chainer.datasets.ConcatenatedDataset(*train_data)
    test_data = VOCLikeDataset(root_dir=root_dir)
    args.class_names = tuple(VOCLikeDataset.class_names.tolist())

    # Model.
    args.min_size = 600
    args.max_size = 1000
    args.anchor_scales = (4, 8, 16, 32)

    train_common.train(
        args=args,
        train_data=train_data,
        test_data=test_data,
        evaluator_type='voc',
    )
Exemplo n.º 3
0
def main():
    args = train_common.parse_args()

    args.logs_dir = osp.join(here, 'logs')

    # Dataset.
    args.dataset = 'coco'
    train_data = chainer.datasets.ConcatenatedDataset(
        cmr.datasets.COCOInstanceSegmentationDataset('train'),
        cmr.datasets.COCOInstanceSegmentationDataset('valminusminival'),
    )
    
    test_data = cmr.datasets.COCOInstanceSegmentationDataset(
        'minival',
        use_crowd=True,
        return_crowd=True,
        return_area=True,
    )
    args.class_names = tuple(test_data.class_names.tolist())

    # Model.
    args.min_size = 800
    args.max_size = 1333
    args.anchor_scales = (2, 4, 8, 16, 32)

    # Run training!.
    train_common.train(
        args=args,
        train_data=train_data,
        test_data=test_data,
        evaluator_type='coco',
    )
Exemplo n.º 4
0
def main():
    parser = train_common.get_parser()
    parser.add_argument(
        '--exclude-arc2017',
        action='store_true',
        help='Exclude ARC2017 objects from synthetic',
    )
    parser.add_argument(
        '--background',
        choices=['tote', 'tote+shelf'],
        default='tote',
        help='background image in 2D synthesis',
    )
    args = parser.parse_args()

    args.logs_dir = osp.join(here, 'logs')

    # Dataset.
    args.dataset = 'synthetic'
    train_data = \
        grasp_fusion.datasets.SyntheticInstanceSegmentationDataset(
            augmentation=True,
            augmentation_level='all',
            exclude_arc2017=args.exclude_arc2017,
            background=args.background,
        )
    test_data = \
        grasp_fusion.datasets.RealInstanceSegmentationDataset()
    args.class_names = tuple(test_data.class_names.tolist())

    # Model.
    args.min_size = 600
    args.max_size = 1000
    args.anchor_scales = (4, 8, 16, 32)

    # Run training!.
    train_common.train(
        args=args,
        train_data=train_data,
        test_data=test_data,
        evaluator_type='coco',
    )
Exemplo n.º 5
0
def main():
    args = train_common.parse_args()

    args.logs_dir = osp.join(here, 'logs')

    # Dataset.
    # args.dataset = 'coco'
    # train_data = chainer.datasets.ConcatenatedDataset(
    #     cmr.datasets.COCOInstanceSegmentationDataset('train'),
    #     cmr.datasets.COCOInstanceSegmentationDataset('valminusminival'),
    # )
    
    # test_data = cmr.datasets.COCOInstanceSegmentationDataset(
    #     'minival',
    #     use_crowd=True,
    #     return_crowd=True,
    #     return_area=True,
    # )
    # args.class_names = tuple(test_data.class_names.tolist())

    args.dataset = 'original'
    train_data = dataset.OomugiDataset()
    test_data = dataset.OomugiDataset(test=True)
    args.class_names = ['leaf']

    # Model.
    args.min_size = 800
    args.max_size = 1333
    args.anchor_scales = (2, 4, 8, 16, 32)
    args.ratios = (0.11, 0.14, 0.2, 0.25, 0.33, 0.5, 1, 2, 3, 4, 5, 7, 9)


    # Run training!.
    train_common.train(
        args=args,
        train_data=train_data,
        test_data=test_data,
        evaluator_type='coco',
    )
Exemplo n.º 6
0
def main():
    args = train_common.parse_args()

    args.logs_dir = osp.join(here, 'logs')

    # Dataset.
    args.dataset = 'voc'
    train_data = cmr.datasets.SBDInstanceSegmentationDataset('train')
    test_data = cmr.datasets.SBDInstanceSegmentationDataset('val')
    args.class_names = tuple(train_data.class_names.tolist())

    # Model.
    args.min_size = 600
    args.max_size = 1000
    args.anchor_scales = (4, 8, 16, 32)

    train_common.train(
        args=args,
        train_data=train_data,
        test_data=test_data,
        evaluator_type='voc',
    )