Example #1
0
def main(args=None):
    # parse arguments
    if args is None:
        args = sys.argv[1:]
    args = parse_args(args)

    # create the generators
    train_generator, validation_generator = create_generators(args)

    num_classes = train_generator.num_classes()
    num_anchors = train_generator.num_anchors

    # optionally choose specific GPU
    if args.gpu:
        os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu

    K.set_session(get_session())

    model, prediction_model = efficientdet(
        args.phi,
        num_classes=num_classes,
        num_anchors=num_anchors,
        weighted_bifpn=args.weighted_bifpn,
        freeze_bn=args.freeze_bn,
        detect_quadrangle=args.detect_quadrangle)
    # load pretrained weights
    if args.snapshot:
        if args.snapshot == 'imagenet':
            model_name = 'efficientnet-b{}'.format(args.phi)
            file_name = '{}_weights_tf_dim_ordering_tf_kernels_autoaugment_notop.h5'.format(
                model_name)
            file_hash = WEIGHTS_HASHES[model_name][1]
            weights_path = keras.utils.get_file(file_name,
                                                BASE_WEIGHTS_PATH + file_name,
                                                cache_subdir='models',
                                                file_hash=file_hash)
            model.load_weights(weights_path, by_name=True)
        else:
            print('Loading model, this may take a second...')
            model.load_weights(args.snapshot, by_name=True)

    # freeze backbone layers
    if args.freeze_backbone:
        # 227, 329, 329, 374, 464, 566, 656
        for i in range(1, [227, 329, 329, 374, 464, 566, 656][args.phi]):
            model.layers[i].trainable = False

    if args.gpu and len(args.gpu.split(',')) > 1:
        model = keras.utils.multi_gpu_model(model,
                                            gpus=list(
                                                map(int, args.gpu.split(','))))

    # compile model
    model.compile(
        optimizer=Adam(lr=1e-3),
        loss={
            'regression':
            smooth_l1_quad() if args.detect_quadrangle else smooth_l1(),
            'classification':
            focal()
        },
    )

    # print(model.summary())

    # create the callbacks
    callbacks = create_callbacks(
        model,
        prediction_model,
        validation_generator,
        args,
    )

    if not args.compute_val_loss:
        validation_generator = None
    elif args.compute_val_loss and validation_generator is None:
        raise ValueError(
            'When you have no validation data, you should not specify --compute-val-loss.'
        )

    # start training
    return model.fit_generator(generator=train_generator,
                               steps_per_epoch=args.steps,
                               initial_epoch=0,
                               epochs=args.epochs,
                               verbose=1,
                               callbacks=callbacks,
                               workers=args.workers,
                               use_multiprocessing=args.multiprocessing,
                               max_queue_size=args.max_queue_size,
                               validation_data=validation_generator)
Example #2
0
def main(train_csv, val_csv, classes_csv, epoch_num, phi_num, steps_epoch,
         batch_num, model_path):

    batch_size = batch_num  #def 1
    phi = phi_num  #def 0
    is_text_detect = False
    is_detect_quadrangle = False
    rand_transf_augm = True
    train_ann_path = train_csv
    train_class_path = classes_csv
    val_ann_path = val_csv
    val_class_path = classes_csv
    epochs = epoch_num
    workers = 1
    steps_p_epoch = steps_epoch
    use_multiproc = True
    max_que_size = 10
    comp_loss = True
    gpu = 0
    freeze_bn_arg = True
    weighted_bi = False

    # create the generators
    train_generator, validation_generator = create_generators(
        batch_size, phi, is_text_detect, is_detect_quadrangle,
        rand_transf_augm, train_ann_path, val_ann_path, train_class_path,
        val_class_path)

    num_classes = train_generator.num_classes()
    num_anchors = train_generator.num_anchors

    # optionally choose specific GPU
    if gpu:
        os.environ['CUDA_VISIBLE_DEVICES'] = gpu

    # K.set_session(get_session())

    model, prediction_model = efficientdet(
        phi,
        num_classes=num_classes,
        num_anchors=num_anchors,
        weighted_bifpn=weighted_bi,
        freeze_bn=freeze_bn_arg,
        detect_quadrangle=is_detect_quadrangle)

    # freeze backbone layers
    if freeze_bn_arg:
        # 227, 329, 329, 374, 464, 566, 656
        for i in range(1, [227, 329, 329, 374, 464, 566, 656][phi]):
            model.layers[i].trainable = False

    model.compile(
        optimizer=Adam(lr=1e-3),
        loss={
            'regression':
            smooth_l1_quad() if is_detect_quadrangle else smooth_l1(),
            'classification': focal()
        },
    )

    # print(model.summary())

    # create the callbacks
    callbacks = create_callbacks(model_path)

    if not comp_loss:
        validation_generator = None
    elif comp_loss and validation_generator is None:
        raise ValueError(
            'When you have no validation data, you should not specify --compute-val-loss.'
        )

    # start training
    return model.fit_generator(generator=train_generator,
                               steps_per_epoch=steps_p_epoch,
                               initial_epoch=0,
                               epochs=epochs,
                               verbose=1,
                               callbacks=callbacks,
                               workers=workers,
                               use_multiprocessing=use_multiproc,
                               max_queue_size=max_que_size,
                               validation_data=validation_generator)