示例#1
0
def create_generator(args, main, DeepForest_config):
    """ Create generators for training and validation.
    """
    # create random transform generator for augmenting training data
    if args.random_transform:
        transform_generator = random_transform_generator(
            min_rotation=-0.1,
            max_rotation=0.1,
            min_translation=(-0.1, -0.1),
            max_translation=(0.1, 0.1),
            min_shear=-0.1,
            max_shear=0.1,
            min_scaling=(0.9, 0.9),
            max_scaling=(1.1, 1.1),
            flip_x_chance=0.5,
            flip_y_chance=0.5,
        )
    else:
        transform_generator = random_transform_generator(flip_x_chance=0.5)

    #Split training and test data - hardcoded paths set below.
    train, test = preprocess.split_training(
        data,
        DeepForest_config,
        single_tile=DeepForest_config["single_tile"],
        experiment=None)

    #Training Generator
    generator = OnTheFlyGenerator(data,
                                  train,
                                  batch_size=args.batch_size,
                                  DeepForest_config=DeepForest_config,
                                  group_method="none")

    return (generator)
示例#2
0
    def __init__(self,
                 backbone="resnet50",
                 random_transform=False,
                 batch_size=2,
                 step=100,
                 epoch=40,
                 anchor_settings=None):
        """ Initializer
            Args:
                backbone
                    string indicate which backbone network to use (only for ones defined in
                    keras-retinanet)
                random_transform
                    boolean indicate whether image is randomly transformed when training
                batch_size
                    number of images sent for each step during training
                step
                    number of steps(batches) to run for each epoch during training
                epoch
                    number of epochs for this training
                anchor_settings
                    a list of lists indicate anchor box parameters. please reference
                    keras-retinanet's document for how to set up anchor box parameters
        """
        check_keras_version()
        self._backbone_name = backbone
        self._backbone = models.backbone(backbone)
        self._img_preprocessor = self._backbone.preprocess_image
        self._batch_size = batch_size
        self._step = step
        self._epoch = epoch

        if random_transform:
            self._transform_generator = random_transform_generator(
                min_rotation=-0.1,
                max_rotation=0.1,
                min_translation=(-0.1, -0.1),
                max_translation=(0.1, 0.1),
                min_shear=-0.1,
                max_shear=0.1,
                min_scaling=(0.9, 0.9),
                max_scaling=(1.1, 1.1),
                flip_x_chance=0.5,
                flip_y_chance=0.5,
            )
        else:
            self._transform_generator = random_transform_generator(
                flip_x_chance=0.5)

        self._common_args = {
            'batch_size': batch_size,
            'preprocess_image': self._img_preprocessor
        }

        if anchor_settings:
            self._anchor_params = AnchorParameters(*anchor_settings)
        else:
            self._anchor_params = AnchorParameters.default
def create_generators(args, preprocess_image):
    """Create generators for training and validation.

    Args:
        args             : parseargs object containing configuration for generators.
        preprocess_image : Function that preprocesses an image for the network.
    """
    common_args = {
        'batch_size': args.batch_size,
        'config': args.config,
        'image_min_side': args.image_min_side,
        'image_max_side': args.image_max_side,
        'preprocess_image': preprocess_image,
    }

    # create random transform generator for augmenting training data
    if args.random_transform:
        transform_generator = random_transform_generator(
            min_rotation=-0.1,
            max_rotation=0.1,
            min_translation=(-0.1, -0.1),
            max_translation=(0.1, 0.1),
            min_shear=-0.1,
            max_shear=0.1,
            min_scaling=(0.9, 0.9),
            max_scaling=(1.1, 1.1),
            flip_x_chance=0.5,
            flip_y_chance=0.5,
        )
        visual_effect_generator = random_visual_effect_generator(
            contrast_range=(0.9, 1.1),
            brightness_range=(-.1, .1),
            hue_range=(-0.05, 0.05),
            saturation_range=(0.95, 1.05))
    else:
        transform_generator = random_transform_generator(flip_x_chance=0.5)
        visual_effect_generator = None
    if args.dataset_type == 'csv':
        train_generator = CSVGenerator(
            args.annotations,
            args.classes,
            transform_generator=transform_generator,
            visual_effect_generator=visual_effect_generator,
            **common_args)

        if args.val_annotations:
            validation_generator = CSVGenerator(args.val_annotations,
                                                args.classes,
                                                shuffle_groups=False,
                                                **common_args)
        else:
            validation_generator = None
    else:
        raise ValueError('Invalid data type received: {}'.format(
            args.dataset_type))

    return train_generator, validation_generator
示例#4
0
def create_generators(args, preprocess_image):
    """Create generators for training and validation.
    # Args
        args: parseargs object containing configuration for generators.
        preprocess_image: Function that preprocesses an image for the network.
    """
    common_args = {
        'batch_size': args.batch_size,
        'image_min_side': args.image_min_side,
        'image_max_side': args.image_max_side,
        'preprocess_image': preprocess_image,
    }

    # create random transform generator for augmenting training data
    if args.random_transform:
        transform_generator = random_transform_generator(
            min_rotation=-0.1,
            max_rotation=0.1,
            min_translation=(-0.1, -0.1),
            max_translation=(0.1, 0.1),
            min_shear=-0.1,
            max_shear=0.1,
            min_scaling=(0.9, 0.9),
            max_scaling=(1.1, 1.1),
            flip_x_chance=0.5,
            flip_y_chance=0.5,
        )
    else:
        transform_generator = random_transform_generator(flip_x_chance=0.5)

    if args.dataset_type == 'train':
        train_generator = RetinaNetGenerator(
            direc_name='/data/data/cells/HeLa/S3',
            training_dirs=['set1', 'set2'],
            raw_image_dir='raw',
            channel_names=['FITC'],
            annotation_dir='annotated',
            annotation_names=['corrected'],
            # args.annotations,
            # args.classes,
            **common_args)

        if args.val_annotations:
            validation_generator = RetinaNetGenerator(
                direc_name='/data/data/cells/HeLa/S3',
                training_dirs=['set1', 'set2'],
                raw_image_dir='raw',
                channel_names=['FITC'],
                annotation_dir='annotated',
                annotation_names=['corrected'],
                # args.val_annotations,
                # args.classes,
                **common_args)
        else:
            validation_generator = None

    return train_generator, validation_generator
示例#5
0
def create_generators(args, data, DeepForest_config):
    """ Create generators for training and validation.
    """
    # create random transform generator for augmenting training data
    if args.random_transform:
        transform_generator = random_transform_generator(
            min_rotation=-0.1,
            max_rotation=0.1,
            min_translation=(-0.1, -0.1),
            max_translation=(0.1, 0.1),
            min_shear=-0.1,
            max_shear=0.1,
            min_scaling=(0.9, 0.9),
            max_scaling=(1.1, 1.1),
            flip_x_chance=0.5,
            flip_y_chance=0.5,
        )
    else:
        transform_generator = random_transform_generator(flip_x_chance=0.5)

    #Split training and test data - hardcoded paths set below.
    train, test = preprocess.split_training(
        data,
        DeepForest_config,
        single_tile=DeepForest_config["single_tile"],
        experiment=experiment)

    experiment.log_dataset_hash(data=train)

    #Write out for debug
    if args.save_path:
        train.to_csv(os.path.join(args.save_path, 'training_dict.csv'),
                     header=False)

    #Training Generator
    train_generator = OnTheFlyGenerator(data,
                                        train,
                                        batch_size=args.batch_size,
                                        DeepForest_config=DeepForest_config,
                                        group_method="none",
                                        shuffle_tile_epoch=True,
                                        name="training")

    #Validation Generator

    validation_generator = OnTheFlyGenerator(
        data,
        test,
        batch_size=args.batch_size,
        DeepForest_config=DeepForest_config,
        group_method="none",
        name="validation")

    return train_generator, validation_generator
def create_generators(args, preprocess_image):
    """ Create generators for training and validation.
    Args
        args             : parseargs object containing configuration for generators.
        preprocess_image : Function that preprocesses an image for the network.
    """
    common_args = {
        'batch_size'       : args.batch_size,
        'image_min_side'   : args.image_min_side,
        'image_max_side'   : args.image_max_side,
        'preprocess_image' : preprocess_image,
    }

    # create random transform generator for augmenting training data
    if args.random_transform:
        transform_generator = random_transform_generator(
            min_rotation=-0.1,
            max_rotation=0.1,
            min_translation=(-0.1, -0.1),
            max_translation=(0.1, 0.1),
            min_shear=-0.1,
            max_shear=0.1,
            min_scaling=(0.8, 0.8),
            max_scaling=(1.2, 1.2),
            flip_x_chance=0.5,
            flip_y_chance=0.5,
        )
    else:
        transform_generator = random_transform_generator(flip_x_chance=0.5)


    train_generator = OpenImagesGenerator(
        args.main_dir,
        subset='train',
        version=args.version,
        labels_filter=args.labels_filter,
        annotation_cache_dir=args.annotation_cache_dir,
        fixed_labels=args.fixed_labels,
        transform_generator=transform_generator,
        **common_args
    )

    validation_generator = OpenImagesGenerator(
        args.main_dir,
        subset='validation',
        version=args.version,
        labels_filter=args.labels_filter,
        annotation_cache_dir=args.annotation_cache_dir,
        fixed_labels=args.fixed_labels,
        **common_args
    )

    return train_generator, validation_generator
示例#7
0
def create_generators(args):
    # create random transform generator for augmenting training data
    transform_generator = random_transform_generator(flip_x_chance=0.5)

    if args.dataset_type == 'train':

        train_generator = MaskRCNNGenerator(
            direc_name='/data/data/cells/HeLa/S3',
            training_dirs=['set1', 'set2'],
            raw_image_dir='raw',
            channel_names=['FITC'],
            annotation_dir='annotated',
            annotation_names=['corrected'],
            transform_generator=transform_generator,
            batch_size=args.batch_size)

        if args.val_annotations:
            validation_generator = MaskRCNNGenerator(
                direc_name='/data/data/cells/HeLa/S3',
                training_dirs=['set1', 'set2'],
                raw_image_dir='raw',
                channel_names=['FITC'],
                annotation_dir='annotated',
                annotation_names=['corrected'],
                batch_size=args.batch_size)
        else:
            validation_generator = None
    else:
        raise ValueError('Invalid data type received: {}'.format(
            args.dataset_type))

    return train_generator, validation_generator
示例#8
0
 def create_generators(batch_size, annotations, classes):
     # create random transform generator for augmenting training data
     transform_generator = random_transform_generator(flip_x_chance=0.5)
     from keras_maskrcnn.preprocessing.csv_generator import CSVGenerator
     train_generator = CSVGenerator(annotations, classes)
     validation_generator = None
     return train_generator, validation_generator
示例#9
0
def create_generators(args):
    transform_generator = random_transform_generator(
        min_rotation=-0.1,  # radians
        max_rotation=0.1,
        min_translation=(-0.1, -0.1),  # could be pixels or percentage
        max_translation=(0.1, 0.1),
        min_shear=-0.1,
        max_shear=0.1,
        min_scaling=(0.9, 0.9),
        max_scaling=(1.1, 1.1),
        flip_x_chance=0.5,
        flip_y_chance=0.5,
    )

    train_generator = AugmentedGenerator(
        args.annotations,
        args.classes,
        transform_generator=transform_generator,
        batch_size=args.batch_size,
        image_min_side=args.image_min_side,
        image_max_side=args.image_max_side)

    if args.val_annotations:
        validation_generator = CSVGenerator(args.val_annotations,
                                            args.classes,
                                            batch_size=args.batch_size,
                                            image_min_side=args.image_min_side,
                                            image_max_side=args.image_max_side)
    else:
        validation_generator = None

    return train_generator, validation_generator
示例#10
0
def create_generator(args):
    # create random transform generator for augmenting training data
    transform_generator = random_transform_generator(
        # min_rotation=-0.1,
        # max_rotation=0.1,
        # min_translation=(-0.1, -0.1),
        # max_translation=(0.1, 0.1),
        # min_shear=-0.1,
        # max_shear=0.1,
        # min_scaling=(0.9, 0.9),
        # max_scaling=(1.1, 1.1),
        flip_x_chance=0.5,
        # flip_y_chance=0.5,
    )

    if args.dataset_type == 'coco':
        # import here to prevent unnecessary dependency on cocoapi
        from ..preprocessing.coco import CocoGenerator

        generator = CocoGenerator(args.coco_path,
                                  args.coco_set,
                                  transform_generator=transform_generator)
    elif args.dataset_type == 'csv':
        from ..preprocessing.csv_generator import CSVGenerator

        generator = CSVGenerator(args.annotations,
                                 args.classes,
                                 transform_generator=transform_generator)
    else:
        raise ValueError('Invalid data type received: {}'.format(
            args.dataset_type))

    return generator
def main():
    args = parse_args(sys.argv[1:])

    transform_generator = random_transform_generator(
        min_rotation=-0.1,
        max_rotation=0.1,
        min_translation=(-0.1, -0.1),
        max_translation=(0.1, 0.1),
        min_shear=-0.1,
        max_shear=0.1,
        min_scaling=(0.9, 0.9),
        max_scaling=(1.1, 1.1),
    )

    generator = CSVGenerator(
        args.annotations,
        args.classes,
        # transform_generator=transform_generator,
        base_dir=args.images_dir)

    os.makedirs('gen_images', exist_ok=True)
    for i in range(5):
        batch = next(generator)
        img_ar = batch[0].reshape(batch[0].shape[1:]).astype('uint8')
        img = Image.fromarray(img_ar)
        img.save('gen_images/{}.jpg'.format(i + 1))
示例#12
0
def test_random_transform():
    prng = np.random.RandomState(0)
    for i in range(100):
        transform = random_transform(prng=prng)
        assert np.array_equal(transform, np.identity(3))

    for i, transform in zip(range(100), random_transform_generator(prng=np.random.RandomState())):
        assert np.array_equal(transform, np.identity(3))
def test_random_transform():
    prng = np.random.RandomState(0)
    for i in range(100):
        transform = random_transform(prng=prng)
        assert np.array_equal(transform, np.identity(3))

    for i, transform in zip(range(100), random_transform_generator(prng=np.random.RandomState())):
        assert np.array_equal(transform, np.identity(3))
示例#14
0
def create_generators(args):
    # create random transform generator for augmenting training data
    transform_generator = random_transform_generator(flip_x_chance=0.5)

    if args.dataset_type == 'coco':
        # import here to prevent unnecessary dependency on cocoapi
        from maskrcnn_modanet.train.coco import CocoGenerator

        train_generator = CocoGenerator(
            args.coco_path,
            'train',
            transform_generator=transform_generator,
            batch_size=args.batch_size,
            config=args.config,
            image_min_side=800,
            image_max_side=1333
        )

        validation_generator = CocoGenerator(
            args.coco_path,
            'val',
            batch_size=args.batch_size,
            config=args.config,
            image_min_side=800,
            image_max_side=1333
        )
    elif args.dataset_type == 'csv':
        from keras_maskrcnn.preprocessing.csv_generator import CSVGenerator

        train_generator = CSVGenerator(
            args.annotations,
            args.classes,
            transform_generator=transform_generator,
            batch_size=args.batch_size,
            config=args.config,
            image_min_side=800,
            image_max_side=1333
        )

        if args.val_annotations:
            validation_generator = CSVGenerator(
                args.val_annotations,
                args.classes,
                batch_size=args.batch_size,
                config=args.config,
                image_min_side=800,
                image_max_side=1333
            )
        else:
            validation_generator = None
    else:
        raise ValueError('Invalid data type received: {}'.format(args.dataset_type))

    return train_generator, validation_generator
示例#15
0
def create_generators(args):
    # create random transform generator for augmenting training data
    transform_generator = random_transform_generator(
        min_rotation=-0.1,
        max_rotation=0.1,
        min_translation=(-0.1, -0.1),
        max_translation=(0.1, 0.1),
        min_shear=-0.1,
        max_shear=0.1,
        min_scaling=(0.9, 0.9),
        max_scaling=(1.1, 1.1),
        flip_x_chance=0.5,
    )

    if args.dataset_type == 'coco':
        # import here to prevent unnecessary dependency on cocoapi
        from ..preprocessing.coco import CocoGenerator

        train_generator = CocoGenerator(
            args.coco_path,
            'train2017',
            transform_generator=transform_generator,
            batch_size=args.batch_size,
            config=args.config)

        validation_generator = None
        if args.evaluation:
            validation_generator = CocoGenerator(args.coco_path,
                                                 'val2017',
                                                 batch_size=args.batch_size,
                                                 config=args.config)
    elif args.dataset_type == 'csv':
        from ..preprocessing.csv_generator import CSVGenerator

        train_generator = CSVGenerator(args.annotations,
                                       args.classes,
                                       transform_generator=transform_generator,
                                       batch_size=args.batch_size,
                                       config=args.config)

        if args.val_annotations:
            validation_generator = CSVGenerator(args.val_annotations,
                                                args.classes,
                                                batch_size=args.batch_size,
                                                config=args.config)
        else:
            validation_generator = None
    else:
        raise ValueError('Invalid data type received: {}'.format(
            args.dataset_type))

    return train_generator, validation_generator
示例#16
0
def create_generators(args, preprocess_image):
    """ Create generators for training and validation.
 
    Args
        args             : parseargs object containing configuration for generators.
        preprocess_image : Function that preprocesses an image for the network.
    """
    common_args = {
        'batch_size'       : args.batch_size,
        'config'           : args.config,
        'image_min_side'   : args.image_min_side,
        'image_max_side'   : args.image_max_side,
        'preprocess_image' : preprocess_image, 
    }
 
#     # create random transform generator for augmenting training data
#     if args.random_transform:
#         transform_generator = random_transform_generator(
#             min_rotation=-0.1,
#             max_rotation=0.1,
#             min_translation=(-0.1, -0.1),
#             max_translation=(0.1, 0.1),
#             min_shear=-0.1,
#             max_shear=0.1,
#             min_scaling=(0.9, 0.9),
#             max_scaling=(1.1, 1.1),
#             flip_x_chance=0.5,
#             flip_y_chance=0.5,
#         )
#     else:
#         transform_generator = random_transform_generator(flip_x_chance=0.5)

    transform_generator = random_transform_generator()
    train_generator = PascalVocGenerator(
        args.train_imgs_dir,
        args.train_anns_dir,
        transform_generator=transform_generator,
        **common_args
    )
    
    validation_generator = PascalVocGenerator(
        args.valid_imgs_dir,
        args.valid_anns_dir,
        **common_args
    )
    return train_generator, validation_generator
示例#17
0
def get_random_augmentator(configs):
    """Return a retinanet data augmentator. @config comes the recipe params."""
    return random_transform_generator(
        min_rotation=float(configs['min_rotation']),
        max_rotation=float(configs['max_rotation']),
        min_translation=(float(configs['min_trans']),
                         float(configs['min_trans'])),
        max_translation=(float(configs['max_trans']),
                         float(configs['max_trans'])),
        min_shear=float(configs['min_shear']),
        max_shear=float(configs['max_shear']),
        min_scaling=(float(configs['min_scaling']),
                     float(configs['min_scaling'])),
        max_scaling=(float(configs['max_scaling']),
                     float(configs['max_scaling'])),
        flip_x_chance=float(configs['flip_x']),
        flip_y_chance=float(configs['flip_y']))
示例#18
0
def create_generators(args):
    # create random transform generator for augmenting training data
    transform_generator = random_transform_generator(flip_x_chance=0.5)

    if args.dataset_type == 'coco':
        # import here to prevent unnecessary dependency on cocoapi
        from ..preprocessing.coco import CocoGenerator

        train_generator = CocoGenerator(
            args.coco_path,
            'train2017',
            transform_generator=transform_generator,
            batch_size=args.batch_size,
        )

        validation_generator = CocoGenerator(
            args.coco_path,
            'val2017',
            batch_size=args.batch_size,
        )
    elif args.dataset_type == 'csv':
        from ..preprocessing.csv_generator import CSVGenerator

        train_generator = CSVGenerator(
            args.annotations,
            args.classes,
            transform_generator=transform_generator,
            batch_size=args.batch_size
        )

        if args.val_annotations:
            validation_generator = CSVGenerator(
                args.val_annotations,
                args.classes,
                batch_size=args.batch_size
            )
        else:
            validation_generator = None
    else:
        raise ValueError('Invalid data type received: {}'.format(args.dataset_type))

    return train_generator, validation_generator
示例#19
0
def create_generators(args):
    # create random transform generator for augmenting training data
    transform_generator = random_transform_generator(min_rotation=-0.122173,
                                                     max_rotation=0.122173)
    train_generator, validation_generator = None, None

    if args.dataset_type == 'traffic_signs':
        train_generator = TrafficSignsGenerator(
            args.train_path,
            transform_generator=transform_generator,
            batch_size=args.batch_size,
            group_method='random',
            image_min_side=1080,
            image_max_side=2592)
        if args.val_path:
            validation_generator = TrafficSignsGenerator(
                args.val_path,
                None,
                batch_size=args.batch_size,
                group_method='random')

    return train_generator, validation_generator
示例#20
0
def evaluateModel(model_path):

    import json
    import os

    with open(
            os.path.expanduser('~') + '/.maskrcnn-modanet/' +
            'savedvars.json') as f:
        savedvars = json.load(f)
    path = savedvars['datapath']

    ann_path = path + "datasets/coco/annotations/"
    ann_orig_path = path + 'datasets/modanet/annotations/'

    coco_path = path + "datasets/coco/"

    from keras_maskrcnn import models

    model = models.load_model(model_path, backbone_name='resnet50')

    from keras_retinanet.utils.transform import random_transform_generator

    transform_generator = random_transform_generator(flip_x_chance=0.5)

    from maskrcnn_modanet.train.coco import CocoGenerator

    validation_generator = CocoGenerator(coco_path,
                                         'val',
                                         batch_size=1,
                                         config=None,
                                         image_min_side=800,
                                         image_max_side=1333)

    from keras_maskrcnn.utils.coco_eval import evaluate_coco

    evaluate_coco(validation_generator, model)
示例#21
0
def create_generators(args, preprocess_image):
    """ Create generators for training and validation.

    Args
        args             : parseargs object containing configuration for generators.
        preprocess_image : Function that preprocesses an image for the network.
    """
    common_args = {
        'batch_size'       : args.batch_size,
        'image_min_side'   : args.image_min_side,
        'image_max_side'   : args.image_max_side,
        'preprocess_image' : preprocess_image,
    }

    # create random transform generator for augmenting training data
    if args.random_transform:
        transform_generator = random_transform_generator(
            min_rotation=-0.05,
            max_rotation=0.05,
            min_translation=(-0.1, -0.1),
            max_translation=(0.1, 0.1),
            #min_shear=-0.1,
            #max_shear=0.1,
            min_scaling=(0.8, 0.8),
            max_scaling=(1.2, 1.2),
            flip_x_chance=0.5,
            #flip_y_chance=0.5,
        )
    else:
        transform_generator = random_transform_generator(flip_x_chance=0.5)

    if args.dataset_type == 'coco':
        # import here to prevent unnecessary dependency on cocoapi
        from ..preprocessing.coco import CocoGenerator

        train_generator = CocoGenerator(
            args.coco_path,
            'train2017',
            transform_generator=transform_generator,
            **common_args
        )

        validation_generator = CocoGenerator(
            args.coco_path,
            'val2017',
            **common_args
        )
    elif args.dataset_type == 'pascal':
        train_generator = PascalVocGenerator(
            args.pascal_path,
            'trainval',
            transform_generator=transform_generator,
            **common_args
        )

        validation_generator = PascalVocGenerator(
            args.pascal_path,
            'test',
            **common_args
        )
    elif args.dataset_type == 'csv':
        train_generator = CSVGenerator(
            args.annotations,
            args.classes,
            transform_generator=transform_generator,
            **common_args
        )

        if args.val_annotations:
            validation_generator = CSVGenerator(
                args.val_annotations,
                args.classes,
                **common_args
            )
        else:
            validation_generator = None
    elif args.dataset_type == 'oid':
        train_generator = OpenImagesGenerator(
            args.main_dir,
            subset='train',
            version=args.version,
            labels_filter=args.labels_filter,
            annotation_cache_dir=args.annotation_cache_dir,
            parent_label=args.parent_label,
            transform_generator=transform_generator,
            **common_args
        )

        validation_generator = OpenImagesGenerator(
            args.main_dir,
            subset='validation',
            version=args.version,
            labels_filter=args.labels_filter,
            annotation_cache_dir=args.annotation_cache_dir,
            parent_label=args.parent_label,
            **common_args
        )
    elif args.dataset_type == 'kitti':
        train_generator = KittiGenerator(
            args.kitti_path,
            subset='train',
            transform_generator=transform_generator,
            **common_args
        )

        validation_generator = KittiGenerator(
            args.kitti_path,
            subset='val',
            **common_args
        )
    else:
        raise ValueError('Invalid data type received: {}'.format(args.dataset_type))

    return train_generator, validation_generator
示例#22
0
def create_generators(args):
    # create random transform generator for augmenting training data
    transform_generator = random_transform_generator(flip_x_chance=0.5)

    if args.dataset_type == 'coco':
        # import here to prevent unnecessary dependency on cocoapi
        from ..preprocessing.coco import CocoGeneratorMask

        train_generator = CocoGeneratorMask(
            args.coco_path,
            'train2017',
            transform_generator=transform_generator,
            batch_size=args.batch_size,
            image_min_side=600,
            image_max_side=1000,
        )

        validation_generator = CocoGeneratorMask(args.coco_path,
                                                 'val2017',
                                                 batch_size=args.batch_size)
    #elif args.dataset_type == 'pascal':
    #    train_generator = PascalVocGenerator(
    #        args.pascal_path,
    #        'trainval',
    #        transform_generator=transform_generator,
    #        batch_size=args.batch_size
    #    )

    #    validation_generator = PascalVocGenerator(
    #        args.pascal_path,
    #        'test',
    #        batch_size=args.batch_size
    #    )
    #elif args.dataset_type == 'csv':
    #    train_generator = CSVGenerator(
    #        args.annotations,
    #        args.classes,
    #        transform_generator=transform_generator,
    #        batch_size=args.batch_size
    #    )

    #    if args.val_annotations:
    #        validation_generator = CSVGenerator(
    #            args.val_annotations,
    #            args.classes,
    #            batch_size=args.batch_size
    #        )
    #    else:
    #        validation_generator = None
    #elif args.dataset_type == 'oid':
    #    train_generator = OpenImagesGenerator(
    #        args.main_dir,
    #        subset='train',
    #        version=args.version,
    #        labels_filter=args.labels_filter,
    #        annotation_cache_dir=args.annotation_cache_dir,
    #        fixed_labels=args.fixed_labels,
    #        transform_generator=transform_generator,
    #        batch_size=args.batch_size
    #    )

    #    if args.val_annotations:
    #        validation_generator = OpenImagesGenerator(
    #            args.main_dir,
    #            subset='validation',
    #            version=args.version,
    #            labels_filter=args.labels_filter,
    #            annotation_cache_dir=args.annotation_cache_dir,
    #            fixed_labels=args.fixed_labels,
    #            batch_size=args.batch_size
    #        )
    #    else:
    #        validation_generator = None
    else:
        raise ValueError('Invalid data type received: {}'.format(
            args.dataset_type))

    return train_generator, validation_generator
示例#23
0
ctx = neptune.Context()
img_min_side = 500
img_max_side = 600

DATA_DIR = '/input/deepfashion_data/'

LABELS = os.path.join(DATA_DIR, 'retina_labels.csv')
TRAIN_DATA = os.path.join(DATA_DIR, 'retina_train_less_negs.csv')
VAL_DATA = os.path.join(DATA_DIR, 'retina_valid.csv')

transform_generator = random_transform_generator(
    min_rotation=-0.1,
    max_rotation=0.1,
    min_translation=(-0.2, -0.2),
    max_translation=(0.2, 0.2),
    min_shear=-0.2,
    max_shear=0.2,
    min_scaling=(0.75, 0.75),
    max_scaling=(-1.5, 1.5),
    flip_x_chance=0.3,
    flip_y_chance=0.3,
)

train_generator = CSVGenerator(
    TRAIN_DATA,
    LABELS,
    base_dir='/input/deepfashion_data',
    batch_size=1,
    image_max_side=img_max_side,
    image_min_side=img_min_side,
    #    transform_generator=transform_generator
)
示例#24
0
def get_data_generator(args):

    backbone = models.backbone(args.backbone)
    preprocess_image = backbone.preprocess_image

    common_args = {
        'batch_size': args.batch_size,
        'config': args.config,
        'image_min_side': args.image_min_side,
        'image_max_side': args.image_max_side,
        'preprocess_image': preprocess_image,
    }

    # create random transform generator for augmenting training data
    if args.random_transform:
        transform_generator = random_transform_generator(
            min_rotation=-0.1,
            max_rotation=0.1,
            min_translation=(-0.1, -0.1),
            max_translation=(0.1, 0.1),
            min_shear=-0.1,
            max_shear=0.1,
            min_scaling=(0.9, 0.9),
            max_scaling=(1.1, 1.1),
            flip_x_chance=0.5,
            flip_y_chance=0.5,
        )
        visual_effect_generator = random_visual_effect_generator(
            contrast_range=(0.9, 1.1),
            brightness_range=(-.1, .1),
            hue_range=(-0.05, 0.05),
            saturation_range=(0.95, 1.05))
    else:
        transform_generator = random_transform_generator(flip_x_chance=0.5)
        visual_effect_generator = None

    if args.dataset_type == 'coco':
        # import here to prevent unnecessary dependency on cocoapi
        from ..preprocessing.coco import CocoGenerator

        train_generator = CocoGenerator(
            args.coco_path,
            'train2017',
            transform_generator=transform_generator,
            visual_effect_generator=visual_effect_generator,
            **common_args)

        validation_generator = CocoGenerator(args.coco_path,
                                             'val2017',
                                             shuffle_groups=False,
                                             **common_args)
    elif args.dataset_type == 'pascal':
        train_generator = PascalVocGenerator(
            args.pascal_path,
            'trainval',
            transform_generator=transform_generator,
            visual_effect_generator=visual_effect_generator,
            **common_args)

        validation_generator = PascalVocGenerator(args.pascal_path,
                                                  'test',
                                                  shuffle_groups=False,
                                                  **common_args)
    elif args.dataset_type == 'csv':
        train_generator = CSVGenerator(
            args.annotations,
            args.classes,
            transform_generator=transform_generator,
            visual_effect_generator=visual_effect_generator,
            **common_args)

        if args.val_annotations:
            validation_generator = CSVGenerator(args.val_annotations,
                                                args.classes,
                                                shuffle_groups=False,
                                                **common_args)
        else:
            validation_generator = None
    elif args.dataset_type == 'oid':
        train_generator = OpenImagesGenerator(
            args.main_dir,
            subset='train',
            version=args.version,
            labels_filter=args.labels_filter,
            annotation_cache_dir=args.annotation_cache_dir,
            parent_label=args.parent_label,
            transform_generator=transform_generator,
            visual_effect_generator=visual_effect_generator,
            **common_args)

        validation_generator = OpenImagesGenerator(
            args.main_dir,
            subset='validation',
            version=args.version,
            labels_filter=args.labels_filter,
            annotation_cache_dir=args.annotation_cache_dir,
            parent_label=args.parent_label,
            shuffle_groups=False,
            **common_args)
    elif args.dataset_type == 'kitti':
        train_generator = KittiGenerator(
            args.kitti_path,
            subset='train',
            transform_generator=transform_generator,
            visual_effect_generator=visual_effect_generator,
            **common_args)

        validation_generator = KittiGenerator(args.kitti_path,
                                              subset='val',
                                              shuffle_groups=False,
                                              **common_args)
    else:
        raise ValueError('Invalid data type received: {}'.format(
            args.dataset_type))

    return train_generator, validation_generator
示例#25
0
def create_generators(args):
    # create random transform generator for augmenting training data
    if args.random_transform:
        transform_generator = random_transform_generator(
            min_translation=(-0.3, -0.3),
            max_translation=(0.3, 0.3),
            min_scaling=(0.2, 0.2),
            max_scaling=(2, 2),
            flip_x_chance=0.5,
        )
    else:
        transform_generator = random_transform_generator(flip_x_chance=0.5)

    if args.dataset_type == 'coco':
        # import here to prevent unnecessary dependency on cocoapi
        from ..preprocessing.coco import CocoGenerator

        train_generator = CocoGenerator(
            args.coco_path,
            'train2017',
            transform_generator=transform_generator,
            batch_size=args.batch_size)

        validation_generator = CocoGenerator(args.coco_path,
                                             'val2017',
                                             batch_size=args.batch_size)
    elif args.dataset_type == 'pascal':
        train_generator = PascalVocGenerator(
            args.pascal_path,
            'trainval',
            transform_generator=transform_generator,
            batch_size=args.batch_size)

        validation_generator = PascalVocGenerator(args.pascal_path,
                                                  'test',
                                                  batch_size=args.batch_size)
    elif args.dataset_type == 'csv':
        train_generator = CSVGenerator(args.annotations,
                                       args.classes,
                                       transform_generator=transform_generator,
                                       batch_size=args.batch_size)

        if args.val_annotations:
            validation_generator = CSVGenerator(args.val_annotations,
                                                args.classes,
                                                batch_size=args.batch_size)
        else:
            validation_generator = None
    elif args.dataset_type == 'oid':
        train_generator = OpenImagesGenerator(
            args.main_dir,
            subset='train',
            version=args.version,
            labels_filter=args.labels_filter,
            annotation_cache_dir=args.annotation_cache_dir,
            fixed_labels=args.fixed_labels,
            transform_generator=transform_generator,
            batch_size=args.batch_size)

        validation_generator = OpenImagesGenerator(
            args.main_dir,
            subset='validation',
            version=args.version,
            labels_filter=args.labels_filter,
            annotation_cache_dir=args.annotation_cache_dir,
            fixed_labels=args.fixed_labels,
            batch_size=args.batch_size)
    elif args.dataset_type == 'kitti':
        train_generator = KittiGenerator(
            args.kitti_path,
            subset='train',
            transform_generator=transform_generator,
            batch_size=args.batch_size)

        validation_generator = KittiGenerator(args.kitti_path,
                                              subset='val',
                                              batch_size=args.batch_size)
    else:
        raise ValueError('Invalid data type received: {}'.format(
            args.dataset_type))

    return train_generator, validation_generator
示例#26
0
def create_generators(args, preprocess_image):
    global config
    """ Create generators for training and validation.

    Args
        args             : parseargs object containing configuration for generators.
        preprocess_image : Function that preprocesses an image for the network.
    """
    common_args = {
        'batch_size': args.batch_size,
        'image_min_side': args.image_min_side,
        'image_max_side': args.image_max_side,
        'preprocess_image': preprocess_image,
    }

    if 0:
        # create random transform generator for augmenting training data
        transform_generator = random_transform_generator(
            min_rotation=-0.2,
            max_rotation=0.2,
            min_translation=(-0.2, -0.2),
            max_translation=(0.2, 0.2),
            min_shear=-0.2,
            max_shear=0.2,
            min_scaling=(0.8, 0.8),
            max_scaling=(1.2, 1.2),
            flip_x_chance=0.5,
            flip_y_chance=0.0,
        )
    else:
        if 1:
            transform_generator = Compose(
                [
                    HorizontalFlip(p=0.5),
                    # VerticalFlip(p=0.5),
                    # RandomRotate90(p=0.5),
                    OneOf([
                        IAAAdditiveGaussianNoise(),
                        GaussNoise(),
                    ], p=0.2),
                    OneOf([
                        MotionBlur(p=.1),
                        MedianBlur(blur_limit=3, p=.1),
                        Blur(blur_limit=3, p=.1),
                    ],
                          p=0.2),
                    # ShiftScaleRotate(shift_limit=0.0, scale_limit=0.0, rotate_limit=10, p=0.1),
                    # IAAPiecewiseAffine(p=0.05),
                    OneOf([
                        CLAHE(clip_limit=2),
                        IAASharpen(),
                        IAAEmboss(),
                        RandomBrightnessContrast(),
                    ],
                          p=0.2),
                    OneOf([
                        RGBShift(p=1.0,
                                 r_shift_limit=(-20, 20),
                                 g_shift_limit=(-20, 20),
                                 b_shift_limit=(-20, 20)),
                        HueSaturationValue(p=1.0),
                    ],
                          p=0.5),
                    ToGray(p=0.3),
                    JpegCompression(p=0.3, quality_lower=25, quality_upper=99),
                ],
                bbox_params={
                    'format': 'pascal_voc',
                    'min_area': 1,
                    'min_visibility': 0.1,
                    'label_fields': ['labels']
                },
                p=1.0)
        else:
            transform_generator = Compose(
                [
                    HorizontalFlip(p=0.5),
                ],
                bbox_params={
                    'format': 'pascal_voc',
                    'min_area': 1,
                    'min_visibility': 0.1,
                    'label_fields': ['labels']
                },
                p=1.0)

    train_generator = CSVGeneratorWhales(
        args.annotations,
        args.classes,
        transform_generator=transform_generator,
        config=config,
        **common_args)

    if args.val_annotations:
        validation_generator = CSVGeneratorWhales(args.val_annotations,
                                                  args.classes,
                                                  config=config,
                                                  **common_args)
    else:
        validation_generator = None

    return train_generator, validation_generator
示例#27
0
def create_generators(args, config):
    """ Create generators for training and validation.
    """
    # create random transform generator for augmenting training data
    if args.random_transform:
        transform_generator = random_transform_generator(
            min_rotation=-0.1,
            max_rotation=0.1,
            min_translation=(-0.1, -0.1),
            max_translation=(0.1, 0.1),
            min_shear=-0.1,
            max_shear=0.1,
            min_scaling=(0.9, 0.9),
            max_scaling=(1.1, 1.1),
            flip_x_chance=0.5,
            flip_y_chance=0.5,
        )
    else:
        transform_generator = random_transform_generator(flip_x_chance=0.5)

    if args.dataset_type == 'csv':
        train_generator = CSVGenerator(args.annotations,
                                       args.classes,
                                       transform_generator=transform_generator,
                                       batch_size=args.batch_size,
                                       image_min_side=args.image_min_side,
                                       image_max_side=args.image_max_side)

        if args.val_annotations:
            validation_generator = CSVGenerator(
                args.val_annotations,
                args.classes,
                batch_size=args.batch_size,
                image_min_side=args.image_min_side,
                image_max_side=args.image_max_side)
        else:
            validation_generator = None

    elif args.dataset_type == 'onthefly':
        train_generator = onthefly.OnTheFlyGenerator(
            args.annotations,
            batch_size=args.batch_size,
            base_dir=config["rgb_tile_dir"],
            config=config)
        if args.val_annotations:

            #Replace config subsample with validation subsample. Not the best, or the worst, way to do this.
            config["subsample"] = config["validation_subsample"]

            validation_generator = onthefly.OnTheFlyGenerator(
                args.val_annotations,
                batch_size=args.batch_size,
                base_dir=config["evaluation_tile_dir"],
                config=config)
        else:
            validation_generator = None
    else:
        raise ValueError('Invalid data type received: {}'.format(
            args.dataset_type))

    return train_generator, validation_generator
示例#28
0
def create_generator(args, config):
    """ Create the data generators.

    Args:
        args: parseargs arguments object.
    """
    # create random transform generator for augmenting training data
    transform_generator = random_transform_generator(
        min_rotation=-0.1,
        max_rotation=0.1,
        min_translation=(-0.1, -0.1),
        max_translation=(0.1, 0.1),
        min_shear=-0.1,
        max_shear=0.1,
        min_scaling=(0.9, 0.9),
        max_scaling=(1.1, 1.1),
        flip_x_chance=0.5,
        flip_y_chance=0.5,
    )

    if args.dataset_type == 'coco':
        # import here to prevent unnecessary dependency on cocoapi
        from ..preprocessing.coco import CocoGenerator

        generator = CocoGenerator(args.coco_path,
                                  args.coco_set,
                                  transform_generator=transform_generator,
                                  image_min_side=args.image_min_side,
                                  image_max_side=args.image_max_side)
    elif args.dataset_type == 'pascal':
        generator = PascalVocGenerator(args.pascal_path,
                                       args.pascal_set,
                                       transform_generator=transform_generator,
                                       image_min_side=args.image_min_side,
                                       image_max_side=args.image_max_side)
    elif args.dataset_type == 'csv':
        generator = CSVGenerator(args.annotations,
                                 args.classes,
                                 transform_generator=transform_generator,
                                 image_min_side=args.image_min_side,
                                 image_max_side=args.image_max_side)
    elif args.dataset_type == 'oid':
        generator = OpenImagesGenerator(
            args.main_dir,
            subset=args.subset,
            version=args.version,
            labels_filter=args.labels_filter,
            fixed_labels=args.fixed_labels,
            annotation_cache_dir=args.annotation_cache_dir,
            transform_generator=transform_generator,
            image_min_side=args.image_min_side,
            image_max_side=args.image_max_side)
    elif args.dataset_type == 'kitti':
        generator = KittiGenerator(args.kitti_path,
                                   subset=args.subset,
                                   transform_generator=transform_generator,
                                   image_min_side=args.image_min_side,
                                   image_max_side=args.image_max_side)
    elif args.dataset_type == 'onthefly':
        validation_generator = onthefly.OnTheFlyGenerator(
            args.annotations, batch_size=args.batch_size, config=config)
    else:
        raise ValueError('Invalid data type received: {}'.format(
            args.dataset_type))

    return generator
示例#29
0
    def on_train_begin(self, logs={}):
        self.losses = []

    def on_batch_end(self, batch, logs={}):
        self.losses.append(logs.get('loss'))


#model = keras_retinanet.models.backbone('resnet50').retinanet(num_classes=11)
model = load_model('./snapshots/weights.35-0.82.hdf5',
                   backbone_name='resnet50')
batch_size = 4
epochs = 150

transform_generator = random_transform_generator(min_rotation=-0.1,
                                                 max_rotation=0.1,
                                                 min_scaling=(0.9, 0.9),
                                                 max_scaling=(1.1, 1.1),
                                                 flip_x_chance=0.5)

generator = CSVGenerator(
    csv_data_file='./data_set_retina/train.csv',
    csv_class_file='./data_set_retina/class_id_mapping.txt',
    batch_size=batch_size,
    transform_generator=transform_generator)

generator_val = CSVGenerator(
    csv_data_file='./data_set_retina/val.csv',
    csv_class_file='./data_set_retina/class_id_mapping.txt',
    batch_size=batch_size,
    transform_generator=transform_generator)
#    transform_generator=transform_generator
示例#30
0
def create_generators(args, preprocess_image):
    """ Create generators for training and validation.

    Args
        args             : parseargs object containing configuration for generators.
        preprocess_image : Function that preprocesses an image for the network.
    """
    common_args = {
        'batch_size': args.batch_size,
        'config': args.config,
        'image_min_side': args.image_min_side,
        'image_max_side': args.image_max_side,
        'preprocess_image': preprocess_image,
    }

    # create random transform generator for augmenting training data
    if args.random_transform:
        transform_generator = random_transform_generator(
            min_rotation=-0.1,
            max_rotation=0.1,
            min_translation=(-0.1, -0.1),
            max_translation=(0.1, 0.1),
            min_shear=-0.1,
            max_shear=0.1,
            min_scaling=(0.9, 0.9),
            max_scaling=(1.1, 1.1),
            flip_x_chance=0.5,
            flip_y_chance=0.5,
        )
    else:
        transform_generator = random_transform_generator(flip_x_chance=0.5)

    if args.dataset_type == 'csv':
        train_generator = CSVGenerator(args.annotations,
                                       args.classes,
                                       transform_generator=transform_generator,
                                       **common_args)

        if args.val_annotations:
            validation_generator = CSVGenerator(args.val_annotations,
                                                args.classes, **common_args)
        else:
            validation_generator = None
    #region +mk@sail
    elif args.dataset_type == 'sail':
        if 'steps' in dir(args):
            train_generator = SAIL_Generator(args.annotations,
                                             args.classes,
                                             transform_generator=None,
                                             force_steps_per_epoch=args.steps,
                                             dataset_type='train',
                                             **common_args)
        else:
            train_generator = SAIL_Generator(args.annotations,
                                             args.classes,
                                             transform_generator=None,
                                             dataset_type='train',
                                             **common_args)

        if args.val_annotations:
            if 'val_steps' in dir(args):
                validation_generator = SAIL_Generator(args.val_annotations,
                                                      args.classes,
                                                      val_steps=args.val_steps,
                                                      dataset_type='val',
                                                      transform_generator=None,
                                                      **common_args)
            else:
                validation_generator = SAIL_Generator(args.val_annotations,
                                                      args.classes,
                                                      dataset_type='val',
                                                      transform_generator=None,
                                                      **common_args)
        else:
            validation_generator = None
    #endregion +mk@sail
    else:
        raise ValueError('Invalid data type received: {}'.format(
            args.dataset_type))

    return train_generator, validation_generator
示例#31
0
def create_vision_generators(args, extra_args):
    if args.vision_module_type == 'retinanet':
        common_args = {
            'batch_size': args.batch_size,
            'config': args.config,
            'image_min_side': args.image_min_side,
            'image_max_side': args.image_max_side,
            'preprocess_image': extra_args.preprocess_image,
        }

        # create random transform generator for augmenting training data
        if args.random_transform:
            transform_generator = random_transform_generator(
                min_rotation=-0.1,
                max_rotation=0.1,
                min_translation=(-0.1, -0.1),
                max_translation=(0.1, 0.1),
                min_shear=-0.1,
                max_shear=0.1,
                min_scaling=(0.9, 0.9),
                max_scaling=(1.1, 1.1),
                flip_x_chance=
                0.0,  # disabled for equivarience purposes - was 0.5
                flip_y_chance=
                0.0,  # disabled for equivarience purposes - was 0.5
            )
        else:
            transform_generator = random_transform_generator(flip_x_chance=0.5)

        train_generator = RetinanetCSVGenerator(
            csv_data_file=args.annotations,
            csv_class_file=args.classes,
            transform_generator=transform_generator,
            **common_args)

        if args.val_annotations:
            validation_generator = RetinanetCSVGenerator(
                csv_data_file=args.val_annotations,
                csv_class_file=args.classes,
                **common_args)
        else:
            validation_generator = None

        return train_generator, validation_generator

    if args.vision_module_type == 'simple_cnn':
        common_args = {
            'batch_size': args.batch_size,
            'image_min_side': args.image_min_side,
            'image_max_side': args.image_max_side,
            'preprocess_image': extra_args.preprocess_image,
        }

        # create random transform generator for augmenting training data
        if args.random_transform:
            transform_generator = random_transform_generator(
                min_rotation=-0.1,
                max_rotation=0.1,
                min_translation=(-0.1, -0.1),
                max_translation=(0.1, 0.1),
                min_shear=-0.1,
                max_shear=0.1,
                min_scaling=(0.9, 0.9),
                max_scaling=(1.1, 1.1),
                flip_x_chance=0.5,
                flip_y_chance=0.5,
            )
        else:
            transform_generator = random_transform_generator(flip_x_chance=0.5)

        # uses the same generator as the retinanet
        train_generator = RetinanetCSVGenerator(
            csv_data_file=args.annotations,
            csv_class_file=args.classes,
            transform_generator=transform_generator,
            **common_args)

        if args.val_annotations:
            validation_generator = RetinanetCSVGenerator(
                csv_data_file=args.val_annotations,
                csv_class_file=args.classes,
                **common_args)
        else:
            validation_generator = None

        return train_generator, validation_generator