コード例 #1
0
def test():
  global_params = get_efficientdet_config()
  blocks_args = [
    EfficientDetBlockArgs(1,3,(1,1),1,32,16,0.25),
    EfficientDetBlockArgs(2,3,(2,2),6,16,24,0.25),
    EfficientDetBlockArgs(2,5,(2,2),6,24,40,0.25),
    EfficientDetBlockArgs(3,3,(2,2),6,40,80,0.25),
    EfficientDetBlockArgs(3,5,(1,1),6,80,112,0.25),
    EfficientDetBlockArgs(4,5,(2,2),6,112,192,0.25),
    EfficientDetBlockArgs(1,3,(1,1),6,192,320,0.25),
  ]

  x = tf.ones([1, 224, 224, 32], dtype=tf.float32)
  layers = []
  for block_args in blocks_args:
    layers.append(MBConvBlock(block_args, global_params))

  for m in layers:
    x = m(x)
    tf.print(tf.shape(x))
    for v in m.trainable_weights:
      tf.print('trainable_weight:', v.name, tf.shape(v))
コード例 #2
0
import tensorflow as tf
import functools
import sys
import os
sys.path.append(os.getcwd())
from ai_api.ai_models.layers.bifpn import BiFPN
from ai_api.ai_models.utils.global_params import get_efficientdet_config

from tensorflow.compat.v1 import ConfigProto
from tensorflow.compat.v1 import InteractiveSession

config = ConfigProto()
config.gpu_options.allow_growth = True
session = InteractiveSession(config=config)

global_params = get_efficientdet_config()
print('level_size:', global_params.levels_size)
m = BiFPN(88, levels_size=global_params.levels_size[3:])


@tf.function
def test(x):
    x = m(x, True)
    tf.print('m:', len(x), type(x))
    for i in range(len(x)):
        tf.print('m:', tf.shape(x[i]), x[i].shape, type(x[i]))
    for v in m.trainable_weights:
        tf.print('m1 trainable_weight:', v.name, tf.shape(v))


p3 = tf.ones(
コード例 #3
0
def main(_):

    # pylint: disable=line-too-long
    # Prepare images and checkpoints: please run these commands in shell.
    # !mkdir tmp
    # !wget https://user-images.githubusercontent.com/11736571/77320690-099af300-6d37-11ea-9d86-24f14dc2d540.png -O tmp/img.png
    # !wget https://storage.googleapis.com/cloud-tpu-checkpoints/efficientdet/coco/efficientdet-d0.tar.gz -O tmp/efficientdet-d0.tar.gz
    # !tar zxf tmp/efficientdet-d0.tar.gz -C tmp
    imgs = [np.array(Image.open(FLAGS.image_path))]
    # Create model config.
    config = get_efficientdet_config(model_name='efficientdet-d4')
    config.is_training_bn = False
    # config.image_size = 1920
    config.nms_configs.score_thresh = 0.4
    config.nms_configs.max_output_size = 100
    config.override(FLAGS.hparams)

    blocks_args = [
        EfficientDetBlockArgs(1, 3, (1, 1), 1, 32, 16, 0.25),
        EfficientDetBlockArgs(2, 3, (2, 2), 6, 16, 24, 0.25),
        EfficientDetBlockArgs(2, 5, (2, 2), 6, 24, 40, 0.25),
        EfficientDetBlockArgs(3, 3, (2, 2), 6, 40, 80, 0.25),
        EfficientDetBlockArgs(3, 5, (1, 1), 6, 80, 112, 0.25),
        EfficientDetBlockArgs(4, 5, (2, 2), 6, 112, 192, 0.25),
        EfficientDetBlockArgs(1, 3, (1, 1), 6, 192, 320, 0.25),
    ]

    # Use 'mixed_float16' if running on GPUs.
    policy = tf.keras.mixed_precision.experimental.Policy('float32')
    tf.keras.mixed_precision.experimental.set_policy(policy)
    tf.config.experimental_run_functions_eagerly(FLAGS.debug)

    # Create and run the model.
    model = EfficientDetModel(blocks_args=blocks_args, global_params=config)
    model.build((None, None, None, 3))
    print('model_dir:', tf.train.latest_checkpoint(FLAGS.model_dir))
    # for v in model.trainable_weights:
    #   print(v.name, tf.shape(v))
    # model.load_weights(tf.train.latest_checkpoint(FLAGS.model_dir))
    # 将原权重复制到新模型
    for i in range(len(model.trainable_variables)):
        ckpt_path = tf.train.latest_checkpoint(FLAGS.model_dir)
        v_new = model.trainable_variables[i]
        v_old = tf.train.load_variable(ckpt_path, v_new.name)
        tf.print(v_new.name)
        if (np.shape(v_old) == np.shape(v_new.numpy())):
            v_new.assign(v_old)
        else:
            tf.print(
                str(i) + ':', v_new.name, np.shape(v_old),
                np.shape(v_new.numpy()))
    return
    model.summary()

    class ExportModel(tf.Module):
        def __init__(self, model):
            super().__init__()
            self.model = model

        @tf.function
        def f(self, imgs):
            return self.model(imgs, training=False, post_mode='global')

    imgs = tf.convert_to_tensor(imgs, dtype=tf.uint8)
    export_model = ExportModel(model)
    # 默认不保存模型
    if FLAGS.saved_model_dir:
        tf.saved_model.save(export_model,
                            FLAGS.saved_model_dir,
                            signatures=export_model.f.get_concrete_function(
                                tf.TensorSpec(shape=(None, None, None, 3),
                                              dtype=tf.uint8)))
        export_model = tf.saved_model.load(FLAGS.saved_model_dir)

    boxes, scores, classes, valid_len = export_model.f(imgs)

    # Visualize results.
    for i, img in enumerate(imgs):
        length = valid_len[i]
        img = inference.visualize_image(
            img,
            boxes[i].numpy()[:length],
            classes[i].numpy().astype(np.int)[:length],
            scores[i].numpy()[:length],
            label_map=config.label_map,
            min_score_thresh=config.nms_configs.score_thresh,
            max_boxes_to_draw=config.nms_configs.max_output_size)
        output_image_path = os.path.join(FLAGS.output_dir, str(i) + '.jpg')
        Image.fromarray(img).save(output_image_path)
        logging.info('writing annotated image to ', output_image_path)
コード例 #4
0
def train():
    '''训练'''
    # 加载数据
    batch_size = 2  # note that more GPU memory is required after unfreezing the body
    num_epochs = 300

    # Create model config.
    config = get_efficientdet_config(model_name='efficientdet-d1')

    anchors = Anchors(config.min_level, config.max_level,
                      (config.image_size, config.image_size),
                      config.num_scales, config.aspect_ratios,
                      config.anchor_scale)

    blocks_args = [
        EfficientDetBlockArgs(1, 3, (1, 1), 1, 32, 16, 0.25),
        EfficientDetBlockArgs(2, 3, (2, 2), 6, 16, 24, 0.25),
        EfficientDetBlockArgs(2, 5, (2, 2), 6, 24, 40, 0.25),
        EfficientDetBlockArgs(3, 3, (2, 2), 6, 40, 80, 0.25),
        EfficientDetBlockArgs(3, 5, (1, 1), 6, 80, 112, 0.25),
        EfficientDetBlockArgs(4, 5, (2, 2), 6, 112, 192, 0.25),
        EfficientDetBlockArgs(1, 3, (1, 1), 6, 192, 320, 0.25),
    ]
    data_set_train, data_generator_train = GetDataSet(image_path=trainData,
                                                      label_path=trainLabels,
                                                      classes_path=classesFile,
                                                      batch_size=batch_size,
                                                      anchors=anchors)
    data_set_val, data_generator_val = GetDataSet(image_path=valData,
                                                  label_path=valLabels,
                                                  classes_path=classesFile,
                                                  batch_size=1,
                                                  anchors=anchors,
                                                  is_train=False)
    steps_per_epoch = max(1,
                          data_generator_train.labels_num // batch_size // 10)
    # steps_per_epoch = 50

    # 构建模型
    model = EfficientDetNetTrain(blocks_args=blocks_args,
                                 global_params=config,
                                 anchors=anchors)

    # 编译模型
    print('编译模型')
    # model.compile(optimizer=tf.keras.optimizers.Adam(lr=1e-4), loss=Yolov4Loss(anchors=anchors,classes_num=data_generator_train.classes_num)) # recompile to apply the change
    # model.compile(optimizer=tf.keras.optimizers.Adam(lr=1e-3))
    adjusted_learning_rate = 0.08 * batch_size / 64
    lr_warmup_init = 0.008
    lr_warmup_step = int(1.0 * steps_per_epoch)
    total_steps = int(num_epochs * steps_per_epoch)
    learning_rate = CosineLrSchedule(adjusted_learning_rate, lr_warmup_init,
                                     lr_warmup_step, total_steps)
    # learning_rate = 0.1
    optimizer = tf.keras.optimizers.SGD(learning_rate, momentum=0.9)

    import tensorflow_addons as tfa  # pylint: disable=g-import-not-at-top
    optimizer = tfa.optimizers.MovingAverage(optimizer, average_decay=0.9998)
    model.compile(optimizer)
    # model.compile(optimizer=RAdam(lr=1e-4))

    # 日志
    log_dir = './data/'
    model_path = log_dir + 'trained_weights_final.h5'
    _ = model(tf.ones((1, config.image_size, config.image_size, 3)))
    if os.path.exists(model_path):
        model.load_weights(model_path)
        print('加载模型:{}'.format(model_path))
    model.summary()
    # for v in model.trainable_weights:
    #     print(v.name, tf.shape(v))

    # logging = tf.keras.callbacks.TensorBoard(log_dir=log_dir)
    # checkpoint = tf.keras.callbacks.ModelCheckpoint(log_dir + 'ep{epoch:03d}-loss{loss:.3f}-val_loss{val_loss:.3f}.h5',
    #     monitor='val_loss', save_weights_only=True, save_best_only=True, period=3)
    # 训练回调方法
    early_stopping = tf.keras.callbacks.EarlyStopping(monitor='val_loss',
                                                      min_delta=0,
                                                      patience=10,
                                                      verbose=1)
    # reduce_lr = tf.keras.callbacks.ReduceLROnPlateau(monitor='loss', factor=0.3, patience=6, verbose=1, mode='min', min_delta=0.0, min_lr=1e-6)
    # early_stopping = tf.keras.callbacks.EarlyStopping(monitor='loss', min_delta=0, patience=10, verbose=1, mode='min', restore_best_weights=True)

    print('Train on {} samples, val on {} samples, with batch size {}.'.format(
        data_generator_train.labels_num, data_generator_val.labels_num,
        batch_size))
    print('开始训练')
    model.fit(data_set_train,
              steps_per_epoch=steps_per_epoch,
              validation_data=data_set_val,
              validation_steps=max(1, data_generator_val.labels_num / 10),
              epochs=num_epochs,
              initial_epoch=0,
              callbacks=[early_stopping,
                         SaveCallback(model_path)])
    model.save_weights(model_path)
コード例 #5
0
def train():
    '''训练'''
    # 加载数据
    batch_size = 3  # note that more GPU memory is required after unfreezing the body

    # Create model config.
    config = get_efficientdet_config(model_name='efficientdet-d4')

    anchors = Anchors(config.min_level, config.max_level,
                      (config.image_size, config.image_size),
                      config.num_scales, config.aspect_ratios,
                      config.anchor_scale)
    data_set_train, data_generator_train = GetDataSet(image_path=trainData,
                                                      label_path=trainLabels,
                                                      classes_path=classesFile,
                                                      batch_size=batch_size,
                                                      anchors=anchors)
    data_set_val, data_generator_val = GetDataSet(image_path=valData,
                                                  label_path=valLabels,
                                                  classes_path=classesFile,
                                                  batch_size=1,
                                                  anchors=anchors,
                                                  is_mean=False)

    # 构建模型
    model = DemoModelTrain(global_params=config)

    # 编译模型
    print('编译模型')
    # model.compile(optimizer=tf.keras.optimizers.Adam(lr=1e-4), loss=Yolov4Loss(anchors=anchors,classes_num=data_generator_train.classes_num)) # recompile to apply the change
    model.compile(optimizer=tf.keras.optimizers.Adam(lr=1e-4))
    # model.compile(optimizer=RAdam(lr=1e-4))

    # 日志
    log_dir = './data/'
    model_path = log_dir + 'trained_weights_final.h5'
    _ = model(tf.ones((1, config.image_size, config.image_size, 3)))
    if os.path.exists(model_path):
        model.load_weights(model_path)
        print('加载模型:{}'.format(model_path))
    model.summary()

    # logging = tf.keras.callbacks.TensorBoard(log_dir=log_dir)
    # checkpoint = tf.keras.callbacks.ModelCheckpoint(log_dir + 'ep{epoch:03d}-loss{loss:.3f}-val_loss{val_loss:.3f}.h5',
    #     monitor='val_loss', save_weights_only=True, save_best_only=True, period=3)
    # 训练回调方法
    reduce_lr = tf.keras.callbacks.ReduceLROnPlateau(monitor='loss',
                                                     factor=0.1,
                                                     patience=3,
                                                     verbose=1)
    early_stopping = tf.keras.callbacks.EarlyStopping(monitor='loss',
                                                      min_delta=0,
                                                      patience=10,
                                                      verbose=1)
    # reduce_lr = tf.keras.callbacks.ReduceLROnPlateau(monitor='loss', factor=0.3, patience=6, verbose=1, mode='min', min_delta=0.0, min_lr=1e-6)
    # early_stopping = tf.keras.callbacks.EarlyStopping(monitor='loss', min_delta=0, patience=10, verbose=1, restore_best_weights=True)

    print('Train on {} samples, val on {} samples, with batch size {}.'.format(
        data_generator_train.labels_num, data_generator_val.labels_num,
        batch_size))
    print('开始训练')
    model.fit(
        data_set_train,
        # steps_per_epoch=max(1, data_generator_train.labels_num//batch_size),
        steps_per_epoch=5000,
        # validation_data=data_set_val,
        # validation_steps=max(1, data_generator_val.labels_num//10),
        # validation_steps=10,
        epochs=300,
        initial_epoch=0,
        callbacks=[reduce_lr, early_stopping,
                   SaveCallback(model_path)])
    model.save_weights(model_path)