Esempio n. 1
0
def create_generators(args, group_queue):
    # create image data generator objects
    train_image_data_generator = keras.preprocessing.image.ImageDataGenerator(
        horizontal_flip=True,
        vertical_flip=True,
        zoom_range=0.15,
        rotation_range=25)
    #    train_image_data_generator = keras.preprocessing.image.ImageDataGenerator(
    #        horizontal_flip=True
    #    )
    val_image_data_generator = keras.preprocessing.image.ImageDataGenerator()

    if args.dataset_type == 'coco':
        # import here to prevent unnecessary dependency on cocoapi
        from keras_retinanet.preprocessing.coco import CocoGenerator

        train_generator = CocoGenerator(args.coco_path,
                                        'train2017',
                                        train_image_data_generator,
                                        batch_size=args.batch_size)

        validation_generator = CocoGenerator(args.coco_path,
                                             'val2017',
                                             val_image_data_generator,
                                             batch_size=args.batch_size)
    elif args.dataset_type == 'pascal':
        train_generator = PascalVocGenerator(args.pascal_path,
                                             'trainval',
                                             train_image_data_generator,
                                             batch_size=args.batch_size)

        validation_generator = PascalVocGenerator(args.pascal_path,
                                                  'test',
                                                  val_image_data_generator,
                                                  batch_size=args.batch_size)
    elif args.dataset_type == 'csv':
        train_generator = CSVGenerator(args.annotations,
                                       args.classes,
                                       args.mean_image,
                                       train_image_data_generator,
                                       batch_size=args.batch_size,
                                       image_min_side=int(args.image_min_side),
                                       image_max_side=int(args.image_max_side),
                                       group_queue=group_queue)

        if args.val_annotations:
            validation_generator = CSVGenerator(args.val_annotations,
                                                args.classes,
                                                args.mean_image,
                                                val_image_data_generator,
                                                batch_size=args.batch_size)
        else:
            validation_generator = None
    else:
        raise ValueError('Invalid data type received: {}'.format(dataset_type))

    return train_generator, validation_generator
Esempio n. 2
0
def create_generators(args):
    # create image data generator objects
    train_image_data_generator = keras.preprocessing.image.ImageDataGenerator(
        horizontal_flip=True,
        vertical_flip=True,
        rotation_range=180,
        width_shift_range=5,
        height_shift_range=5,
        preprocessing_function=add_noise  # Kai - add noise to input
    )
    val_image_data_generator = keras.preprocessing.image.ImageDataGenerator(
        # Kai - may want to add noise to input as well
    )

    if args.dataset_type == 'coco':
        # import here to prevent unnecessary dependency on cocoapi
        from keras_retinanet.preprocessing.coco import CocoGenerator

        train_generator = CocoGenerator(args.coco_path,
                                        'train2017',
                                        train_image_data_generator,
                                        batch_size=args.batch_size)

        validation_generator = CocoGenerator(args.coco_path,
                                             'val2017',
                                             val_image_data_generator,
                                             batch_size=args.batch_size)
    elif args.dataset_type == 'pascal':
        train_generator = PascalVocGenerator(args.pascal_path,
                                             'trainval',
                                             train_image_data_generator,
                                             batch_size=args.batch_size)

        validation_generator = PascalVocGenerator(args.pascal_path,
                                                  'test',
                                                  val_image_data_generator,
                                                  batch_size=args.batch_size)
    elif args.dataset_type == 'csv':
        train_generator = CSVGenerator(args.annotations,
                                       args.classes,
                                       train_image_data_generator,
                                       batch_size=args.batch_size)

        if args.val_annotations:
            validation_generator = CSVGenerator(args.val_annotations,
                                                args.classes,
                                                val_image_data_generator,
                                                batch_size=args.batch_size)
        else:
            validation_generator = None
    else:
        raise ValueError('Invalid data type received: {}'.format(dataset_type))

    return train_generator, validation_generator
Esempio n. 3
0
def create_generators(args):
    # create image data generator objects
    train_image_data_generator = keras.preprocessing.image.ImageDataGenerator(
        horizontal_flip=True, )
    val_image_data_generator = keras.preprocessing.image.ImageDataGenerator()

    if args.dataset_type == 'coco':
        # import here to prevent unnecessary dependency on cocoapi
        from keras_retinanet.preprocessing.coco import CocoGenerator

        train_generator = CocoGenerator(args.coco_path,
                                        'train2017',
                                        train_image_data_generator,
                                        batch_size=args.batch_size)

        validation_generator = CocoGenerator(args.coco_path,
                                             'val2017',
                                             val_image_data_generator,
                                             batch_size=args.batch_size)
    elif args.dataset_type == 'pascal':
        train_generator = PascalVocGenerator(args.pascal_path,
                                             'trainval',
                                             train_image_data_generator,
                                             batch_size=args.batch_size)

        validation_generator = PascalVocGenerator(args.pascal_path,
                                                  'test',
                                                  val_image_data_generator,
                                                  batch_size=args.batch_size)
    elif args.dataset_type == 'csv':
        train_generator = CSVGenerator(args.annotations,
                                       args.classes,
                                       train_image_data_generator,
                                       batch_size=args.batch_size,
                                       base_dir="./")

        if args.val_annotations:
            validation_generator = CSVGenerator(args.val_annotations,
                                                args.classes,
                                                val_image_data_generator,
                                                batch_size=args.batch_size,
                                                base_dir="./")
        else:
            validation_generator = None
    else:
        raise ValueError('Invalid data type received: {}'.format(
            args.dataset_type))

    return train_generator, validation_generator
Esempio n. 4
0
    def create_generators(self, args, transform_generator=None):
        train_generator = PascalVocGenerator(
            args.pascal_path,
            'trainval',
            transform_generator=transform_generator,
            batch_size=args.batch_size)

        validation_generator = PascalVocGenerator(args.pascal_path,
                                                  'test',
                                                  batch_size=args.batch_size)

        return {
            "train_generator": train_generator,
            "validation_generator": validation_generator
        }
Esempio n. 5
0
def create_generator(args, config):
    """ Create generators for evaluation.
    """
    if args.dataset_type == 'coco':
        # import here to prevent unnecessary dependency on cocoapi
        from keras_retinanet.preprocessing.coco import CocoGenerator

        validation_generator = CocoGenerator(
            args.coco_path,
            'val2017',
            image_min_side=args.image_min_side,
            image_max_side=args.image_max_side)
    elif args.dataset_type == 'pascal':
        validation_generator = PascalVocGenerator(
            args.pascal_path,
            'test',
            image_min_side=args.image_min_side,
            image_max_side=args.image_max_side)
    elif args.dataset_type == 'csv':
        validation_generator = CSVGenerator(args.annotations,
                                            args.classes,
                                            image_min_side=args.image_min_side,
                                            image_max_side=args.image_max_side)
    elif args.dataset_type == 'onthefly':
        validation_generator = onthefly.OnTheFlyGenerator(
            args.annotations,
            batch_size=args.batch_size,
            base_dir=config["rgb_tile_dir"],
            config=config)
    else:
        raise ValueError('Invalid data type received: {}'.format(
            args.dataset_type))

    return validation_generator
def create_generator(args):
    """ Create generators for evaluation.
    """
    if args.dataset_type == 'coco':
        # import here to prevent unnecessary dependency on cocoapi
        from ..preprocessing.coco import CocoGenerator

        validation_generator = CocoGenerator(
            args.coco_path,
            'val2017',
            image_min_side=args.image_min_side,
            image_max_side=args.image_max_side,
            config=args.config)
    elif args.dataset_type == 'pascal':
        validation_generator = PascalVocGenerator(
            args.pascal_path,
            'test',
            image_min_side=args.image_min_side,
            image_max_side=args.image_max_side,
            config=args.config)
    elif args.dataset_type == 'csv':
        validation_generator = CSVGenerator(args.annotations,
                                            args.classes,
                                            image_min_side=args.image_min_side,
                                            image_max_side=args.image_max_side,
                                            config=args.config)
    else:
        raise ValueError('Invalid data type received: {}'.format(
            args.dataset_type))

    return validation_generator
Esempio n. 7
0
def create_generator(args, preprocess_image):
    """ Create generators for evaluation.
    """
    common_args = {
        'preprocess_image': preprocess_image,
    }

    if args.dataset_type == 'coco':
        # import here to prevent unnecessary dependency on cocoapi
        from keras_retinanet.preprocessing.coco import CocoGenerator

        validation_generator = CocoGenerator(
            args.coco_path,
            'val2017',
            image_min_side=args.image_min_side,
            image_max_side=args.image_max_side,
            config=args.config,
            shuffle_groups=False,
            **common_args)
    elif args.dataset_type == 'pascal':
        validation_generator = PascalVocGenerator(
            args.pascal_path,
            'test',
            image_extension=args.image_extension,
            image_min_side=args.image_min_side,
            image_max_side=args.image_max_side,
            config=args.config,
            shuffle_groups=False,
            **common_args)
    elif args.dataset_type == 'csv':
        validation_generator = CSVGenerator(args.annotations,
                                            args.classes,
                                            image_min_side=args.image_min_side,
                                            image_max_side=args.image_max_side,
                                            config=args.config,
                                            shuffle_groups=False,
                                            **common_args)
    else:
        raise ValueError('Invalid data type received: {}'.format(
            args.dataset_type))

    return validation_generator
Esempio n. 8
0
def create_generators(args, preprocess_image):
    """ Create generators for training and validation.

    Args
        args             : parseargs object containing configuration for generators.
        preprocess_image : Function that preprocesses an image for the network.
    """
    common_args = {
        'batch_size'       : args.batch_size,
        'image_min_side'   : args.image_min_side,
        'image_max_side'   : args.image_max_side,
        'preprocess_image' : preprocess_image,
    }

    # create random transform generator for augmenting training data
    if args.random_transform:
        transform_generator = random_transform_generator(
            min_rotation=-0.05,
            max_rotation=0.05,
            min_translation=(-0.1, -0.1),
            max_translation=(0.1, 0.1),
            #min_shear=-0.1,
            #max_shear=0.1,
            min_scaling=(0.8, 0.8),
            max_scaling=(1.2, 1.2),
            flip_x_chance=0.5,
            #flip_y_chance=0.5,
        )
    else:
        transform_generator = random_transform_generator(flip_x_chance=0.5)

    if args.dataset_type == 'coco':
        # import here to prevent unnecessary dependency on cocoapi
        from ..preprocessing.coco import CocoGenerator

        train_generator = CocoGenerator(
            args.coco_path,
            'train2017',
            transform_generator=transform_generator,
            **common_args
        )

        validation_generator = CocoGenerator(
            args.coco_path,
            'val2017',
            **common_args
        )
    elif args.dataset_type == 'pascal':
        train_generator = PascalVocGenerator(
            args.pascal_path,
            'trainval',
            transform_generator=transform_generator,
            **common_args
        )

        validation_generator = PascalVocGenerator(
            args.pascal_path,
            'test',
            **common_args
        )
    elif args.dataset_type == 'csv':
        train_generator = CSVGenerator(
            args.annotations,
            args.classes,
            transform_generator=transform_generator,
            **common_args
        )

        if args.val_annotations:
            validation_generator = CSVGenerator(
                args.val_annotations,
                args.classes,
                **common_args
            )
        else:
            validation_generator = None
    elif args.dataset_type == 'oid':
        train_generator = OpenImagesGenerator(
            args.main_dir,
            subset='train',
            version=args.version,
            labels_filter=args.labels_filter,
            annotation_cache_dir=args.annotation_cache_dir,
            parent_label=args.parent_label,
            transform_generator=transform_generator,
            **common_args
        )

        validation_generator = OpenImagesGenerator(
            args.main_dir,
            subset='validation',
            version=args.version,
            labels_filter=args.labels_filter,
            annotation_cache_dir=args.annotation_cache_dir,
            parent_label=args.parent_label,
            **common_args
        )
    elif args.dataset_type == 'kitti':
        train_generator = KittiGenerator(
            args.kitti_path,
            subset='train',
            transform_generator=transform_generator,
            **common_args
        )

        validation_generator = KittiGenerator(
            args.kitti_path,
            subset='val',
            **common_args
        )
    else:
        raise ValueError('Invalid data type received: {}'.format(args.dataset_type))

    return train_generator, validation_generator
Esempio n. 9
0
    args = parse_args()

    # make sure keras is the minimum required version
    check_keras_version()

    # optionally choose specific GPU
    if args.gpu:
        os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
    keras.backend.tensorflow_backend.set_session(get_session())

    # create the model
    print('Loading model, this may take a second...')
    model = keras.models.load_model(args.model, custom_objects=custom_objects)

    # create image data generator object
    test_image_data_generator = keras.preprocessing.image.ImageDataGenerator()

    # create a generator for testing data
    # test_generator = CocoGenerator(
    #     args.coco_path,
    #     args.set,
    #     test_image_data_generator,
    # )

    test_generator = PascalVocGenerator(
        args.voc_path,
        'test',
        test_image_data_generator,
    )
    evaluate_voc(test_generator, model, args.score_threshold)
Esempio n. 10
0
model = keras.models.load_model(
    '/data/users/xiziwang/tools/nsp/snapshots/origin_resnet50_voc_best.h5',
    custom_objects=custom_objects)
#print(model.summary())

# ## Initialize data generators

# In[3]:

# create image data generator object
val_image_data_generator = keras.preprocessing.image.ImageDataGenerator()

# create a generator for testing data
val_generator = PascalVocGenerator(
    '/data/users/xiziwang/tools/nsp/JHdevkit/VOC2007',
    'test',
    val_image_data_generator,
    batch_size=1,
)
index = 0

# ## Run detection on example

# In[6]:

# load image
f = open('/data/users/xiziwang/tobelabeled/files.txt', 'r')
files = f.read().splitlines()
result = open('/data/users/xiziwang/tobelabeled/result.txt', 'w')

for fn in files:

if __name__ == '__main__':
    # parse arguments
    args = parse_args()

    train_path = args.path

    # create image data generator objects
    train_image_data_generator = keras.preprocessing.image.ImageDataGenerator(
        horizontal_flip=False, )

    # create a generator for training data
    train_generator = PascalVocGenerator(
        data_dir=train_path,
        set_name='train',
        image_data_generator=train_image_data_generator,
    )

    test_image_data_generator = keras.preprocessing.image.ImageDataGenerator()

    # create a generator for testing data
    test_generator = PascalVocGenerator(
        data_dir=train_path,
        set_name='val',
        image_data_generator=test_image_data_generator,
    )

    num_classes = train_generator.num_classes()

    # create the model
Esempio n. 12
0
def create_generator(args, config):
    """ Create the data generators.

    Args:
        args: parseargs arguments object.
    """
    # create random transform generator for augmenting training data
    transform_generator = random_transform_generator(
        min_rotation=-0.1,
        max_rotation=0.1,
        min_translation=(-0.1, -0.1),
        max_translation=(0.1, 0.1),
        min_shear=-0.1,
        max_shear=0.1,
        min_scaling=(0.9, 0.9),
        max_scaling=(1.1, 1.1),
        flip_x_chance=0.5,
        flip_y_chance=0.5,
    )

    if args.dataset_type == 'coco':
        # import here to prevent unnecessary dependency on cocoapi
        from ..preprocessing.coco import CocoGenerator

        generator = CocoGenerator(args.coco_path,
                                  args.coco_set,
                                  transform_generator=transform_generator,
                                  image_min_side=args.image_min_side,
                                  image_max_side=args.image_max_side)
    elif args.dataset_type == 'pascal':
        generator = PascalVocGenerator(args.pascal_path,
                                       args.pascal_set,
                                       transform_generator=transform_generator,
                                       image_min_side=args.image_min_side,
                                       image_max_side=args.image_max_side)
    elif args.dataset_type == 'csv':
        generator = CSVGenerator(args.annotations,
                                 args.classes,
                                 transform_generator=transform_generator,
                                 image_min_side=args.image_min_side,
                                 image_max_side=args.image_max_side)
    elif args.dataset_type == 'oid':
        generator = OpenImagesGenerator(
            args.main_dir,
            subset=args.subset,
            version=args.version,
            labels_filter=args.labels_filter,
            fixed_labels=args.fixed_labels,
            annotation_cache_dir=args.annotation_cache_dir,
            transform_generator=transform_generator,
            image_min_side=args.image_min_side,
            image_max_side=args.image_max_side)
    elif args.dataset_type == 'kitti':
        generator = KittiGenerator(args.kitti_path,
                                   subset=args.subset,
                                   transform_generator=transform_generator,
                                   image_min_side=args.image_min_side,
                                   image_max_side=args.image_max_side)
    elif args.dataset_type == 'onthefly':
        validation_generator = onthefly.OnTheFlyGenerator(
            args.annotations, batch_size=args.batch_size, config=config)
    else:
        raise ValueError('Invalid data type received: {}'.format(
            args.dataset_type))

    return generator
Esempio n. 13
0
def create_generators(args):
    # create random transform generator for augmenting training data
    if args.random_transform:
        transform_generator = random_transform_generator(
            min_translation=(-0.3, -0.3),
            max_translation=(0.3, 0.3),
            min_scaling=(0.2, 0.2),
            max_scaling=(2, 2),
            flip_x_chance=0.5,
        )
    else:
        transform_generator = random_transform_generator(flip_x_chance=0.5)

    if args.dataset_type == 'coco':
        # import here to prevent unnecessary dependency on cocoapi
        from ..preprocessing.coco import CocoGenerator

        train_generator = CocoGenerator(
            args.coco_path,
            'train2017',
            transform_generator=transform_generator,
            batch_size=args.batch_size)

        validation_generator = CocoGenerator(args.coco_path,
                                             'val2017',
                                             batch_size=args.batch_size)
    elif args.dataset_type == 'pascal':
        train_generator = PascalVocGenerator(
            args.pascal_path,
            'trainval',
            transform_generator=transform_generator,
            batch_size=args.batch_size)

        validation_generator = PascalVocGenerator(args.pascal_path,
                                                  'test',
                                                  batch_size=args.batch_size)
    elif args.dataset_type == 'csv':
        train_generator = CSVGenerator(args.annotations,
                                       args.classes,
                                       transform_generator=transform_generator,
                                       batch_size=args.batch_size)

        if args.val_annotations:
            validation_generator = CSVGenerator(args.val_annotations,
                                                args.classes,
                                                batch_size=args.batch_size)
        else:
            validation_generator = None
    elif args.dataset_type == 'oid':
        train_generator = OpenImagesGenerator(
            args.main_dir,
            subset='train',
            version=args.version,
            labels_filter=args.labels_filter,
            annotation_cache_dir=args.annotation_cache_dir,
            fixed_labels=args.fixed_labels,
            transform_generator=transform_generator,
            batch_size=args.batch_size)

        validation_generator = OpenImagesGenerator(
            args.main_dir,
            subset='validation',
            version=args.version,
            labels_filter=args.labels_filter,
            annotation_cache_dir=args.annotation_cache_dir,
            fixed_labels=args.fixed_labels,
            batch_size=args.batch_size)
    elif args.dataset_type == 'kitti':
        train_generator = KittiGenerator(
            args.kitti_path,
            subset='train',
            transform_generator=transform_generator,
            batch_size=args.batch_size)

        validation_generator = KittiGenerator(args.kitti_path,
                                              subset='val',
                                              batch_size=args.batch_size)
    else:
        raise ValueError('Invalid data type received: {}'.format(
            args.dataset_type))

    return train_generator, validation_generator
Esempio n. 14
0
def get_data_generator(args):

    backbone = models.backbone(args.backbone)
    preprocess_image = backbone.preprocess_image

    common_args = {
        'batch_size': args.batch_size,
        'config': args.config,
        'image_min_side': args.image_min_side,
        'image_max_side': args.image_max_side,
        'preprocess_image': preprocess_image,
    }

    # create random transform generator for augmenting training data
    if args.random_transform:
        transform_generator = random_transform_generator(
            min_rotation=-0.1,
            max_rotation=0.1,
            min_translation=(-0.1, -0.1),
            max_translation=(0.1, 0.1),
            min_shear=-0.1,
            max_shear=0.1,
            min_scaling=(0.9, 0.9),
            max_scaling=(1.1, 1.1),
            flip_x_chance=0.5,
            flip_y_chance=0.5,
        )
        visual_effect_generator = random_visual_effect_generator(
            contrast_range=(0.9, 1.1),
            brightness_range=(-.1, .1),
            hue_range=(-0.05, 0.05),
            saturation_range=(0.95, 1.05))
    else:
        transform_generator = random_transform_generator(flip_x_chance=0.5)
        visual_effect_generator = None

    if args.dataset_type == 'coco':
        # import here to prevent unnecessary dependency on cocoapi
        from ..preprocessing.coco import CocoGenerator

        train_generator = CocoGenerator(
            args.coco_path,
            'train2017',
            transform_generator=transform_generator,
            visual_effect_generator=visual_effect_generator,
            **common_args)

        validation_generator = CocoGenerator(args.coco_path,
                                             'val2017',
                                             shuffle_groups=False,
                                             **common_args)
    elif args.dataset_type == 'pascal':
        train_generator = PascalVocGenerator(
            args.pascal_path,
            'trainval',
            transform_generator=transform_generator,
            visual_effect_generator=visual_effect_generator,
            **common_args)

        validation_generator = PascalVocGenerator(args.pascal_path,
                                                  'test',
                                                  shuffle_groups=False,
                                                  **common_args)
    elif args.dataset_type == 'csv':
        train_generator = CSVGenerator(
            args.annotations,
            args.classes,
            transform_generator=transform_generator,
            visual_effect_generator=visual_effect_generator,
            **common_args)

        if args.val_annotations:
            validation_generator = CSVGenerator(args.val_annotations,
                                                args.classes,
                                                shuffle_groups=False,
                                                **common_args)
        else:
            validation_generator = None
    elif args.dataset_type == 'oid':
        train_generator = OpenImagesGenerator(
            args.main_dir,
            subset='train',
            version=args.version,
            labels_filter=args.labels_filter,
            annotation_cache_dir=args.annotation_cache_dir,
            parent_label=args.parent_label,
            transform_generator=transform_generator,
            visual_effect_generator=visual_effect_generator,
            **common_args)

        validation_generator = OpenImagesGenerator(
            args.main_dir,
            subset='validation',
            version=args.version,
            labels_filter=args.labels_filter,
            annotation_cache_dir=args.annotation_cache_dir,
            parent_label=args.parent_label,
            shuffle_groups=False,
            **common_args)
    elif args.dataset_type == 'kitti':
        train_generator = KittiGenerator(
            args.kitti_path,
            subset='train',
            transform_generator=transform_generator,
            visual_effect_generator=visual_effect_generator,
            **common_args)

        validation_generator = KittiGenerator(args.kitti_path,
                                              subset='val',
                                              shuffle_groups=False,
                                              **common_args)
    else:
        raise ValueError('Invalid data type received: {}'.format(
            args.dataset_type))

    return train_generator, validation_generator
Esempio n. 15
0
def create_generators(args, preprocess_image):
    """ Create generators for training and validation.

    Args
        args             : parseargs object containing configuration for generators.
        preprocess_image : Function that preprocesses an image for the network.
    """
    common_args = {
        "batch_size": args.batch_size,
        "config": args.config,
        "image_min_side": args.image_min_side,
        "image_max_side": args.image_max_side,
        "preprocess_image": preprocess_image,
    }

    # create random transform generator for augmenting training data
    if args.random_transform:
        transform_generator = random_transform_generator(
            min_rotation=-0.1,
            max_rotation=0.1,
            min_translation=(-0.1, -0.1),
            max_translation=(0.1, 0.1),
            min_shear=-0.1,
            max_shear=0.1,
            min_scaling=(0.9, 0.9),
            max_scaling=(1.1, 1.1),
            flip_x_chance=0.5,
            flip_y_chance=0.5,
        )
    else:
        transform_generator = random_transform_generator(flip_x_chance=0.5)

    if args.dataset_type == "coco":
        # import here to prevent unnecessary dependency on cocoapi
        from keras_retinanet.preprocessing.coco import CocoGenerator

        train_generator = CocoGenerator(
            args.coco_path,
            "train2017",
            transform_generator=transform_generator,
            **common_args)

        validation_generator = CocoGenerator(args.coco_path,
                                             "val2017",
                                             shuffle_groups=False,
                                             **common_args)
    elif args.dataset_type == "pascal":
        train_generator = PascalVocGenerator(
            args.pascal_path,
            "trainval",
            transform_generator=transform_generator,
            **common_args)

        validation_generator = PascalVocGenerator(args.pascal_path,
                                                  "test",
                                                  shuffle_groups=False,
                                                  **common_args)
    elif args.dataset_type == "csv":
        train_generator = CSVGenerator(args.annotations,
                                       args.classes,
                                       transform_generator=transform_generator,
                                       **common_args)

        if args.val_annotations:
            validation_generator = CSVGenerator(args.val_annotations,
                                                args.classes,
                                                shuffle_groups=False,
                                                **common_args)
        else:
            validation_generator = None
    elif args.dataset_type == "oid":
        train_generator = OpenImagesGenerator(
            args.main_dir,
            subset="train",
            version=args.version,
            labels_filter=args.labels_filter,
            annotation_cache_dir=args.annotation_cache_dir,
            parent_label=args.parent_label,
            transform_generator=transform_generator,
            **common_args)

        validation_generator = OpenImagesGenerator(
            args.main_dir,
            subset="validation",
            version=args.version,
            labels_filter=args.labels_filter,
            annotation_cache_dir=args.annotation_cache_dir,
            parent_label=args.parent_label,
            shuffle_groups=False,
            **common_args)
    elif args.dataset_type == "kitti":
        train_generator = KittiGenerator(
            args.kitti_path,
            subset="train",
            transform_generator=transform_generator,
            **common_args)

        validation_generator = KittiGenerator(args.kitti_path,
                                              subset="val",
                                              shuffle_groups=False,
                                              **common_args)
    else:
        raise ValueError("Invalid data type received: {}".format(
            args.dataset_type))

    return train_generator, validation_generator
Esempio n. 16
0
        'regression': keras_retinanet.losses.smooth_l1(),
        'classification': keras_retinanet.losses.focal()
    },
                  optimizer=keras.optimizers.adam(lr=1e-5, clipnorm=0.001))

    # print model summary
    print(model.summary())

    # create image data generator objects
    train_image_data_generator = keras.preprocessing.image.ImageDataGenerator(
        horizontal_flip=True, )
    val_image_data_generator = keras.preprocessing.image.ImageDataGenerator()

    # create a generator for training data
    train_generator = PascalVocGenerator(args.voc_path,
                                         'trainval',
                                         train_image_data_generator,
                                         batch_size=args.batch_size)

    # create a generator for testing data
    val_generator = PascalVocGenerator(args.voc_path,
                                       'test',
                                       val_image_data_generator,
                                       batch_size=args.batch_size)

    # start training
    model.fit_generator(
        generator=train_generator,
        steps_per_epoch=len(train_generator.image_names) // args.batch_size,
        epochs=50,
        verbose=1,
        validation_data=val_generator,
Esempio n. 17
0
    # parse arguments
    args = parse_args()

    # make sure keras is the minimum required version
    check_keras_version()

    # optionally choose specific GPU
    if args.gpu:
        os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
    keras.backend.tensorflow_backend.set_session(get_session())

    # create the model
    print('Loading model, this may take a second...')
    model = keras.models.load_model(args.model, custom_objects=custom_objects)

    # create image data generator object
    test_image_data_generator = keras.preprocessing.image.ImageDataGenerator()

    # create a generator for testing data
    test_generator = PascalVocGenerator(args.pascal_path, args.set,
                                        test_image_data_generator)

    voc_evaluator = VOCEvaluator(generator=test_generator,
                                 model=model,
                                 threshold=args.score_threshold,
                                 iou_threshold=args.iou_threshold,
                                 max_detections=args.max_det,
                                 save=args.save_images,
                                 save_path=args.save_path)
    voc_evaluator.evaluate()