Exemplo n.º 1
0
DATA_SUFFIX = '_datamap.png'
RESULT_PATH = "result/"
PROCESS_PATH = "process/"
model_path = 'checkpoints/csv.h5'
colors = [(255, 0, 0), (0, 255, 0), (0, 0, 255)]
score_threshold = 0.5
flip_test = False

os.environ['CUDA_VISIBLE_DEVICES'] = '0'
generator = CSVGenerator(
    'data/annotations.csv',
    'data/classes.csv',
    'data',
)

num_classes = generator.num_classes()
classes = list(generator.classes.keys())

model, prediction_model, debug_model = centernet(num_classes=num_classes,
                                                 nms=True,
                                                 flip_test=flip_test,
                                                 freeze_bn=False,
                                                 score_threshold=score_threshold)
prediction_model.load_weights(model_path, by_name=True, skip_mismatch=True)


for f in os.listdir(PROCESS_PATH):
    if f.endswith(DATA_SUFFIX):
        image = read_image_bgr(PROCESS_PATH + f)
        src_image = image.copy()
 common_args = {
     "batch_size": 1,
     "phi": phi,
 }
 test_generator = CSVGenerator(
     path_test_csv,
     path_classes_csv,
     base_dir=paths_base_dir,
     detect_quadrangle=quad_angle_arg,
     detect_text=txt_detect_arg,
 )
 model_path = ckp_base_path + ckp_model_dir + "/" + ckp_model_file
 input_shape = (test_generator.image_size, test_generator.image_size)
 print(input_shape)
 anchors = test_generator.anchors
 num_classes = test_generator.num_classes()
 model, prediction_model = efficientdet(phi=phi,
                                        num_classes=num_classes,
                                        weighted_bifpn=weighted_bifpn)
 prediction_model.load_weights(model_path, by_name=True)
 average_precisions = evaluate(test_generator,
                               prediction_model,
                               visualize=False)
 # compute per class average precision
 total_instances = []
 precisions = []
 for label, (average_precision,
             num_annotations) in average_precisions.items():
     print(
         "{:.0f} instances of class".format(num_annotations),
         test_generator.label_to_name(label),
Exemplo n.º 3
0
def load_efficient_det(config, LOCAL_ANNOTATIONS_PATH, LOCAL_ROOT_PATH,
                       LOCAL_CLASSES_PATH, LOCAL_VALIDATIONS_PATH,
                       LOCAL_LOGS_PATH, LOCAL_SNAPSHOTS_PATH):

    common_args = {
        'phi': config['phi'],
        'detect_text': config['detect_text'],
        'detect_quadrangle': config['detect_quadrangle']
    }

    # create random transform generator for augmenting training data
    if config['random_transform']:
        misc_effect = MiscEffect()
        visual_effect = VisualEffect()
    else:
        misc_effect = None
        visual_effect = None

    annotations_df = pd.read_csv(LOCAL_ANNOTATIONS_PATH, header=None)
    # stratified sampling
    N = int(len(annotations_df) * 0.15)
    evaluation_df = annotations_df.groupby(
        5, group_keys=False).apply(lambda x: x.sample(
            int(np.rint(N * len(x) / len(annotations_df))))).sample(frac=1)
    evaluation_path = f'{LOCAL_ROOT_PATH}/evaluation.csv'
    evaluation_df.to_csv(evaluation_path, index=False, header=None)

    config['steps_per_epoch'] = annotations_df.iloc[:, 0].nunique(
    ) / config['batch_size']

    train_generator = CSVGenerator(LOCAL_ANNOTATIONS_PATH,
                                   LOCAL_CLASSES_PATH,
                                   batch_size=config['batch_size'],
                                   misc_effect=misc_effect,
                                   visual_effect=visual_effect,
                                   **common_args)
    if config['train_evaluation']:
        evaluation_generator = CSVGenerator(evaluation_path,
                                            LOCAL_CLASSES_PATH,
                                            batch_size=config['batch_size'],
                                            misc_effect=misc_effect,
                                            visual_effect=visual_effect,
                                            **common_args)
    else:
        evaluation_generator = None
    if config['validation']:
        validation_generator = CSVGenerator(LOCAL_VALIDATIONS_PATH,
                                            LOCAL_CLASSES_PATH,
                                            batch_size=config['batch_size'],
                                            misc_effect=misc_effect,
                                            visual_effect=visual_effect,
                                            **common_args)
    else:
        validation_generator = None
    num_classes = train_generator.num_classes()
    num_anchors = train_generator.num_anchors

    model, prediction_model = efficientdet(
        config['phi'],
        num_classes=num_classes,
        num_anchors=num_anchors,
        weighted_bifpn=config['weighted_bifpn'],
        freeze_bn=config['freeze_bn'],
        detect_quadrangle=config['detect_quadrangle'])

    # freeze backbone layers
    if config['freeze_backbone']:
        # 227, 329, 329, 374, 464, 566, 656
        for i in range(1, [227, 329, 329, 374, 464, 566, 656][config['phi']]):
            model.layers[i].trainable = False
    # optionally choose specific GPU
    gpu = config['gpu']
    device = gpu.split(':')[0]
    if gpu:
        os.environ['CUDA_VISIBLE_DEVICES'] = device
    if gpu and len(gpu.split(':')) > 1:
        gpus = gpu.split(':')[1]
        model = tf.keras.utils.multi_gpu_model(model,
                                               gpus=list(
                                                   map(int, gpus.split(','))))

    if config['snapshot'] == 'imagenet':
        model_name = 'efficientnet-b{}'.format(config['phi'])
        file_name = '{}_weights_tf_dim_ordering_tf_kernels_autoaugment_notop.h5'.format(
            model_name)
        file_hash = WEIGHTS_HASHES[model_name][1]
        weights_path = tf.keras.utils.get_file(file_name,
                                               BASE_WEIGHTS_PATH + file_name,
                                               cache_subdir='models',
                                               file_hash=file_hash)
        model.load_weights(weights_path, by_name=True)
    elif config['snapshot']:
        print('Loading model, this may take a second...')
        model.load_weights(config['snapshot'], by_name=True)

    return (model, prediction_model, train_generator, evaluation_generator,
            validation_generator, config)