コード例 #1
0
def prepare_data_pipeline():
    # batch size
    config_dict['batch_size'] = 4

    # number of train data_loader workers
    config_dict['num_train_workers'] = 4

    # number of val data_loader workers
    config_dict['num_val_workers'] = 4

    # construct train data_loader
    config_dict['train_dataset_path'] = './COCO_pack/coco_train2017.pkl'
    train_dataset = Dataset(load_path=config_dict['train_dataset_path'])

    train_dataset_sampler = COCORandomDatasetSampler(
        dataset=train_dataset,
        batch_size=config_dict['batch_size'],
        shuffle=True,
    )

    train_region_sampler = TypicalCOCOTrainingRegionSampler(
        resize_shorter_range=(800, ), resize_longer_limit=1333, pad_divisor=32)

    config_dict['train_data_loader'] = DataLoader(
        dataset=train_dataset,
        dataset_sampler=train_dataset_sampler,
        region_sampler=train_region_sampler,
        augmentation_pipeline=typical_coco_train_pipeline,
        num_workers=config_dict['num_train_workers'])

    # construct val data_loader
    config_dict['val_dataset_path'] = './COCO_pack/coco_val2017.pkl'
    val_dataset = Dataset(load_path=config_dict['val_dataset_path'])
    val_dataset_sampler = RandomDatasetSampler(
        dataset=val_dataset,
        batch_size=config_dict['batch_size'],
        shuffle=False,
        ignore_last=False)
    val_region_sampler = TypicalCOCOTrainingRegionSampler(
        resize_shorter_range=(800, ), resize_longer_limit=1333, pad_divisor=32)
    config_dict['val_data_loader'] = DataLoader(
        dataset=val_dataset,
        dataset_sampler=val_dataset_sampler,
        region_sampler=val_region_sampler,
        augmentation_pipeline=typical_coco_val_pipeline,
        num_workers=config_dict['num_val_workers'])

    # evaluator
    # the evaluator should match the dataset
    config_dict[
        'val_annotation_path'] = '/home/yonghaohe/datasets/COCO/annotations/instances_val2017.json'
    config_dict['evaluator'] = COCOEvaluator(
        annotation_path=config_dict['val_annotation_path'],
        label_indexes_to_category_ids=val_dataset.
        meta_info['label_indexes_to_category_ids'])
コード例 #2
0
def prepare_data_pipeline():
    # batch size
    config_dict['batch_size'] = 64

    # number of train data_loader workers
    config_dict['num_train_workers'] = 12

    # number of val data_loader workers
    config_dict['num_val_workers'] = 0

    # construct train data_loader
    config_dict['train_dataset_path'] = './WIDERFACE_pack/widerface_train.pkl'
    train_dataset = Dataset(load_path=config_dict['train_dataset_path'])
    train_dataset_sampler = RandomWithNegDatasetSampler(
        train_dataset,
        batch_size=config_dict['batch_size'],
        neg_ratio=0.2,
        shuffle=True,
        ignore_last=False)

    train_region_sampler = RandomBBoxCropRegionSampler(crop_size=480,
                                                       resize_range=(0.5, 1.5),
                                                       resize_prob=0.5)

    config_dict['train_data_loader'] = DataLoader(
        dataset=train_dataset,
        dataset_sampler=train_dataset_sampler,
        region_sampler=train_region_sampler,
        augmentation_pipeline=simple_widerface_train_pipeline,
        num_workers=config_dict['num_train_workers'])
コード例 #3
0
def prepare_data_pipeline():
    # batch size
    config_dict['batch_size'] = 12

    # number of train data_loader workers
    config_dict['num_train_workers'] = 6

    # number of val data_loader workers
    config_dict['num_val_workers'] = 0

    # construct train data_loader
    config_dict['train_dataset_path'] = './WIDERFACE_pack/widerface_train.pkl'
    train_dataset = Dataset(load_path=config_dict['train_dataset_path'])
    train_dataset_sampler = RandomWithNegDatasetSampler(
        train_dataset,
        batch_size=config_dict['batch_size'],
        neg_ratio=0.2,
        shuffle=True,
        ignore_last=False)

    train_region_sampler = RandomBBoxCropWithRangeSelectionRegionSampler(
        crop_size=480,
        detection_ranges=config_dict['detection_scales'],
        range_selection_probs=[1, 1, 1, 1, 1],
        lock_threshold=30)

    config_dict['train_data_loader'] = DataLoader(
        dataset=train_dataset,
        dataset_sampler=train_dataset_sampler,
        region_sampler=train_region_sampler,
        augmentation_pipeline=simple_widerface_train_pipeline,
        num_workers=config_dict['num_train_workers'])
コード例 #4
0
def prepare_data_pipeline():
    # batch size
    config_dict['batch_size'] = 16

    # number of train data_loader workers
    config_dict['num_train_workers'] = 4

    # number of val data_loader workers
    config_dict['num_val_workers'] = 0

    # construct train data_loader
    config_dict['train_dataset_path'] = 'xxxxxxxxx'
    train_dataset = Dataset(load_path=config_dict['train_dataset_path'])
    train_dataset_sampler = RandomWithNegDatasetSampler(
        train_dataset,
        batch_size=config_dict['batch_size'],
        neg_ratio=0.1,
        shuffle=True,
        ignore_last=False)
    train_region_sampler = RandomBBoxCropRegionSampler(crop_size=512,
                                                       resize_range=(0.5, 1.5))
    config_dict['train_data_loader'] = DataLoader(
        dataset=train_dataset,
        dataset_sampler=train_dataset_sampler,
        region_sampler=train_region_sampler,
        augmentation_pipeline=simple_widerface_train_pipeline,
        num_workers=config_dict['num_train_workers'])

    # construct val data_loader
    config_dict['val_dataset_path'] = 'xxxxxxxxxx'
    val_dataset = Dataset(load_path=config_dict['val_dataset_path'])
    val_dataset_sampler = RandomDatasetSampler(
        dataset=val_dataset,
        batch_size=config_dict['batch_size'],
        shuffle=False,
        ignore_last=False)
    val_region_sampler = IdleRegionSampler()
    config_dict['val_data_loader'] = DataLoader(
        dataset=val_dataset,
        dataset_sampler=val_dataset_sampler,
        region_sampler=val_region_sampler,
        augmentation_pipeline=simple_widerface_val_pipeline,
        num_workers=config_dict['num_val_workers'])
コード例 #5
0
def prepare_data_pipeline():
    # batch size
    config_dict['batch_size'] = 4

    # number of train data_loader workers
    config_dict['num_train_workers'] = 4

    # number of val data_loader workers
    config_dict['num_val_workers'] = 4

    # construct train data_loader
    config_dict['train_dataset_path'] = './debug_data/train.pkl'
    train_dataset = Dataset(load_path=config_dict['train_dataset_path'])
    train_dataset_sampler = RandomWithNegDatasetSampler(
        train_dataset,
        batch_size=config_dict['batch_size'],
        neg_ratio=0.2,
        shuffle=True,
        ignore_last=False)

    train_region_sampler = RandomBBoxCropRegionSampler(crop_size=640,
                                                       resize_range=(0.5, 1.5),
                                                       resize_prob=0.5)

    config_dict['train_data_loader'] = DataLoader(
        dataset=train_dataset,
        dataset_sampler=train_dataset_sampler,
        region_sampler=train_region_sampler,
        augmentation_pipeline=train_pipeline,
        num_workers=config_dict['num_train_workers'])

    # construct val data_loader
    # config_dict['val_dataset_path'] = './debug_data/train.pkl'
    # val_dataset = Dataset(load_path=config_dict['val_dataset_path'])
    # val_dataset_sampler = RandomDatasetSampler(dataset=val_dataset,
    #                                            batch_size=config_dict['batch_size'],
    #                                            shuffle=False,
    #                                            ignore_last=False)
    # val_region_sampler = IdleRegionSampler()
    # config_dict['val_data_loader'] = DataLoader(dataset=val_dataset,
    #                                             dataset_sampler=val_dataset_sampler,
    #                                             region_sampler=val_region_sampler,
    #                                             augmentation_pipeline=val_pipeline,
    #                                             num_workers=config_dict['num_val_workers'])

    # evaluator
    # the evaluator should match the dataset
    # validation interval in epochs
    config_dict['val_interval'] = 0
    # config_dict['val_annotation_path'] = './debug_data/annotations/instances_train2017.json'
    config_dict[
        'evaluator'] = None  # COCOEvaluator(annotation_path=config_dict['val_annotation_path'],
コード例 #6
0
# number of train data_loader workers
config_dict['num_train_workers'] = 4

# number of val data_loader workers
config_dict['num_val_workers'] = 2

# construct train data_loader
config_dict['train_dataset_path'] = 'xxxxxxxxx'
train_dataset = Dataset(load_path=config_dict['train_dataset_path'])
train_dataset_sampler = COCORandomDatasetSampler(dataset=train_dataset,
                                                 batch_size=config_dict['batch_size'],
                                                 shuffle=True, )
train_region_sampler = TypicalCOCOTrainingRegionSampler(resize_shorter_range=(800,), resize_longer_limit=1333, pad_divisor=32)
config_dict['train_data_loader'] = DataLoader(dataset=train_dataset,
                                              dataset_sampler=train_dataset_sampler,
                                              region_sampler=train_region_sampler,
                                              augmentation_pipeline=typical_coco_train_pipeline,
                                              num_workers=config_dict['num_train_workers'])

# construct val data_loader
config_dict['val_dataset_path'] = 'xxxxxxxxxx'
val_dataset = Dataset(load_path=config_dict['val_dataset_path'])
val_dataset_sampler = RandomDatasetSampler(dataset=val_dataset,
                                           batch_size=config_dict['batch_size'],
                                           shuffle=False,
                                           ignore_last=False)
val_region_sampler = TypicalCOCOTrainingRegionSampler(resize_shorter_range=(800,), resize_longer_limit=1333, pad_divisor=32)
config_dict['val_data_loader'] = DataLoader(dataset=val_dataset,
                                            dataset_sampler=val_dataset_sampler,
                                            region_sampler=val_region_sampler,
                                            augmentation_pipeline=typical_coco_val_pipeline,