def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--model_path',
                        help='model file to predict',
                        type=str,
                        required=True)
    parser.add_argument('--image_file',
                        help='image file to predict',
                        type=str,
                        required=True)
    parser.add_argument('--anchors_path',
                        help='path to anchor definitions',
                        type=str,
                        required=True)
    parser.add_argument(
        '--classes_path',
        help='path to class definitions, default ../configs/voc_classes.txt',
        type=str,
        default='../configs/voc_classes.txt')
    parser.add_argument('--loop_count',
                        help='loop inference for certain times',
                        type=int,
                        default=1)

    args = parser.parse_args()

    # param parse
    anchors = get_anchors(args.anchors_path)
    class_names = get_classes(args.classes_path)

    validate_yolo_model_mnn(args.model_path, args.image_file, anchors,
                            class_names, args.loop_count)
예제 #2
0
    def __init__(self, FLAGS):
        self.__dict__.update(self._defaults)  # set up default values
        self.backbone = FLAGS['backbone']
        self.opt = FLAGS['opt']
        self.class_names = get_classes(FLAGS['classes_path'])
        self.anchors = get_anchors(FLAGS['anchors_path'])
        self.input_shape = FLAGS['input_size']
        config = tf.ConfigProto()

        if self.opt == OPT.XLA:
            config.graph_options.optimizer_options.global_jit_level = tf.OptimizerOptions.ON_1
            sess = tf.Session(config=config)
            tf.keras.backend.set_session(sess)
        elif self.opt == OPT.MKL:
            config.intra_op_parallelism_threads = 4
            config.inter_op_parallelism_threads = 4
            sess = tf.Session(config=config)
            tf.keras.backend.set_session(sess)
        elif self.opt == OPT.DEBUG:
            tf.logging.set_verbosity(tf.logging.DEBUG)
            sess = tf_debug.TensorBoardDebugWrapperSession(
                tf.Session(config=tf.ConfigProto(log_device_placement=True)),
                "localhost:6064")
            tf.keras.backend.set_session(sess)
        else:
            sess = tf.keras.backend.get_session()
        self.sess = sess
        self.generate(FLAGS)
예제 #3
0
 def __init__(self, **kwargs):
     super(YOLO_np, self).__init__()
     self.__dict__.update(self._defaults)  # set up default values
     self.__dict__.update(kwargs)  # and update with user overrides
     self.class_names = get_classes(self.classes_path)
     self.anchors = get_anchors(self.anchors_path)
     self.colors = get_colors(self.class_names)
     K.set_learning_phase(0)
     self.yolo_model = self._generate_model()
예제 #4
0
    def __init__(self, FLAGS):
        self.backbone = FLAGS.get('backbone', BACKBONE.MOBILENETV2)
        self.class_names = get_classes(
            FLAGS.get('classes_path', 'model_data/voc_classes.txt'))
        self.anchors = get_anchors(
            FLAGS.get('anchors_path', 'model_data/yolo_anchors'))
        self.input_shape = FLAGS.get('input_size', (416, 416))
        self.score = FLAGS.get('score', 0.2)
        self.nms = FLAGS.get('nms', 0.5)
        self.with_classes = FLAGS.get('with_classes', False)

        self.generate(FLAGS)
예제 #5
0
def main():
    annotation_path = 'create_train_data/train.txt'
    log_dir = os.path.join(
        "trained_model", time.strftime("%Y-%m-%d %H-%M-%S", time.localtime()))
    if os.path.exists("trained_model"):
        os.mkdir(log_dir)
    classes_path = 'model_data/voc_classes.txt'
    anchors_path = 'model_data/yolo_anchors.txt'
    class_names = get_classes(classes_path)
    anchors = get_anchors(anchors_path)
    input_shape = (416, 416)  # multiple of 32, hw
    model = create_model(input_shape, anchors, len(class_names))
    train(model,
          annotation_path,
          input_shape,
          anchors,
          len(class_names),
          log_dir=log_dir)
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--model_path', help='model file to predict', type=str, required=True)
    parser.add_argument('--image_file', help='image file to predict', type=str, required=True)
    parser.add_argument('--anchors_path',help='path to anchor definitions', type=str, required=True)
    parser.add_argument('--classes_path', help='path to class definitions, default ../configs/voc_classes.txt', type=str, default='../configs/voc_classes.txt')
    parser.add_argument('--model_image_size', help='model image input size as <num>x<num>, default 416x416', type=str, default='416x416')
    parser.add_argument('--loop_count', help='loop inference for certain times', type=int, default=1)

    args = parser.parse_args()

    # param parse
    model = load_model(args.model_path, compile=False)
    anchors = get_anchors(args.anchors_path)
    class_names = get_classes(args.classes_path)
    height, width = args.model_image_size.split('x')
    model_image_size = (int(height), int(width))

    validate_yolo_model(model, args.image_file, anchors, class_names, model_image_size, args.loop_count)
예제 #7
0
    def __init__(self,
                 weights_path,
                 anchors_path,
                 classes_path,
                 model_image_size=(None, None),
                 score=0.3,
                 iou=0.45,
                 nb_gpu=1,
                 gpu_frac=None,
                 **kwargs):
        """

        :param str weights_path: path to loaded model weights, e.g. 'model_data/tiny-yolo.h5'
        :param str anchors_path: path to loaded model anchors, e.g. 'model_data/tiny-yolo_anchors.csv'
        :param str classes_path: path to loaded trained classes, e.g. 'model_data/coco_classes.txt'
        :param float score: confidence score
        :param float iou:
        :param tuple(int,int) model_image_size: e.g. for tiny (416, 416)
        :param int nb_gpu:
        :param float gpu_frac: fraction of GPU memory to reserve, None for automatic
        :param kwargs:
        """
        self.__dict__.update(kwargs)  # and update with user overrides
        self.weights_path = update_path(weights_path)
        self.anchors_path = update_path(anchors_path)
        self.classes_path = update_path(classes_path)
        self.score = score
        self.iou = iou
        self.model_image_size = model_image_size
        self.nb_gpu = nb_gpu
        if not self.nb_gpu:
            # disable all GPUs
            os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
        self.class_names = get_class_names(self.classes_path)
        self.anchors = get_anchors(self.anchors_path)
        self._open_session(gpu_frac)
        self.boxes, self.scores, self.classes = self._create_model()
        self._generate_class_colors()
예제 #8
0
import os, json, pytz, time, datetime

import numpy as np
from yolo import YOLO
from PIL import Image
from io import BytesIO

from yolo3.utils import get_classes, get_anchors
from imgur_api import upload_photo

annotation_path = os.path.join('model_data', 'anno.txt')
classes_path = os.path.join('model_data', 'sp_classes.txt')
anchors_path = os.path.join('model_data', 'yolo_anchors.txt')
class_names = get_classes(classes_path)
num_classes = len(class_names)
anchors = get_anchors(anchors_path)

input_shape = (416, 416)  # multiple of 32, hw

yolo = YOLO(model_path='single_label.h5',
            classes_path=classes_path,
            anchors_path=anchors_path)

# 載入 line secret key
secretFileContentJson = json.load(
    open("./line_secret_key", "r", encoding="utf8"))

# 設定 Server 啟用細節
app = Flask(__name__, static_url_path="/images", static_folder="./images/")

# 生成實體物件
예제 #9
0
def _main(args):
    global lr_base, total_epochs
    lr_base = args.learning_rate
    total_epochs = args.total_epoch

    annotation_file = args.annotation_file
    log_dir = 'logs/000/'
    classes_path = args.classes_path
    class_names = get_classes(classes_path)
    num_classes = len(class_names)
    if args.tiny_version:
        anchors_path = 'configs/tiny_yolo_anchors.txt'
    else:
        anchors_path = 'configs/yolo_anchors.txt'
    anchors = get_anchors(anchors_path)
    print("\nanchors = ", anchors)
    print("\nnum_classes = ", num_classes)

    # get freeze level according to CLI option
    if args.weights_path:
        freeze_level = 0
    else:
        freeze_level = 1

    if args.freeze_level is not None:
        freeze_level = args.freeze_level
        print("\n\nFREEZE LEVEL  = ", freeze_level)

    # callbacks for training process
    logging = TensorBoard(log_dir=log_dir,
                          histogram_freq=0,
                          write_graph=False,
                          write_grads=False,
                          write_images=False,
                          update_freq='batch')
    checkpoint = ModelCheckpoint(
        log_dir + 'ep{epoch:03d}-loss{loss:.3f}-val_loss{val_loss:.3f}.h5',
        monitor='val_loss',
        verbose=1,
        save_weights_only=False,
        save_best_only=True,
        period=5)
    reduce_lr = ReduceLROnPlateau(monitor='val_loss',
                                  factor=0.5,
                                  patience=5,
                                  verbose=1,
                                  cooldown=0,
                                  min_lr=1e-10)
    lr_scheduler = LearningRateScheduler(learning_rate_scheduler)
    early_stopping = EarlyStopping(monitor='val_loss',
                                   min_delta=0,
                                   patience=30,
                                   verbose=1)
    terminate_on_nan = TerminateOnNaN()

    callbacks = [
        logging, checkpoint, reduce_lr, early_stopping, terminate_on_nan
    ]

    # get train&val dataset
    dataset = get_dataset(annotation_file)
    if args.val_annotation_file:
        val_dataset = get_dataset(args.val_annotation_file)
        num_train = len(dataset)
        num_val = len(val_dataset)
        dataset.extend(val_dataset)
    else:
        val_split = args.val_split
        num_val = int(len(dataset) * val_split)
        num_train = len(dataset) - num_val

    # prepare model pruning config
    pruning_end_step = np.ceil(1.0 * num_train / args.batch_size).astype(
        np.int32) * args.total_epoch
    if args.model_pruning:
        pruning_callbacks = [
            sparsity.UpdatePruningStep(),
            sparsity.PruningSummaries(log_dir=log_dir, profile_batch=0)
        ]
        callbacks = callbacks + pruning_callbacks

    # prepare optimizer
    optimizer = get_optimizer(args.optimizer, args.learning_rate)

    # get train model
    model = get_yolo3_train_model(args.model_type,
                                  anchors,
                                  num_classes,
                                  weights_path=args.weights_path,
                                  freeze_level=freeze_level,
                                  optimizer=optimizer,
                                  label_smoothing=args.label_smoothing,
                                  model_pruning=args.model_pruning,
                                  pruning_end_step=pruning_end_step)
    # support multi-gpu training
    if args.gpu_num >= 2:
        model = multi_gpu_model(model, gpus=args.gpu_num)
    model.summary()

    # Train some initial epochs with frozen layers first if needed, to get a stable loss.
    input_shape = args.model_image_size
    assert (input_shape[0] % 32 == 0
            and input_shape[1] % 32 == 0), 'Multiples of 32 required'
    batch_size = args.batch_size
    initial_epoch = 0
    epochs = args.init_epoch
    print("Initial training stage")
    print(
        'Train on {} samples, val on {} samples, with batch size {}, input_shape {}.'
        .format(num_train, num_val, batch_size, input_shape))
    model.fit_generator(data_generator_wrapper(dataset[:num_train], batch_size,
                                               input_shape, anchors,
                                               num_classes),
                        steps_per_epoch=max(1, num_train // batch_size),
                        validation_data=data_generator_wrapper(
                            dataset[num_train:], batch_size, input_shape,
                            anchors, num_classes),
                        validation_steps=max(1, num_val // batch_size),
                        epochs=epochs,
                        initial_epoch=initial_epoch,
                        callbacks=callbacks)

    # Apply Cosine learning rate decay only after
    # unfreeze all layers
    if args.cosine_decay_learning_rate:
        callbacks.remove(reduce_lr)
        callbacks.append(lr_scheduler)

    # Unfreeze the whole network for further training
    # NOTE: more GPU memory is required after unfreezing the body
    print("Unfreeze and continue training, to fine-tune.")
    for i in range(len(model.layers)):
        model.layers[i].trainable = True
    model.compile(optimizer=optimizer,
                  loss={
                      'yolo_loss': lambda y_true, y_pred: y_pred
                  })  # recompile to apply the change

    if args.multiscale:
        # prepare multiscale config
        input_shape_list = get_multiscale_list(args.model_type,
                                               args.tiny_version)
        interval = args.rescale_interval

        # Do multi-scale training on different input shape
        # change every "rescale_interval" epochs
        for epoch_step in range(epochs + interval, args.total_epoch, interval):
            # shuffle train/val dataset for cross-validation
            if args.data_shuffle:
                np.random.shuffle(dataset)

            initial_epoch = epochs
            epochs = epoch_step
            # rescale input only from 2nd round, to make sure unfreeze stable
            if initial_epoch != args.init_epoch:
                input_shape = input_shape_list[random.randint(
                    0,
                    len(input_shape_list) - 1)]
            print(
                'Train on {} samples, val on {} samples, with batch size {}, input_shape {}.'
                .format(num_train, num_val, batch_size, input_shape))
            model.fit_generator(
                data_generator_wrapper(dataset[:num_train], batch_size,
                                       input_shape, anchors, num_classes),
                steps_per_epoch=max(1, num_train // batch_size),
                validation_data=data_generator_wrapper(dataset[num_train:],
                                                       batch_size, input_shape,
                                                       anchors, num_classes),
                validation_steps=max(1, num_val // batch_size),
                epochs=epochs,
                initial_epoch=initial_epoch,
                callbacks=callbacks)
    else:
        # Do single-scale training
        print(
            'Train on {} samples, val on {} samples, with batch size {}, input_shape {}.'
            .format(num_train, num_val, batch_size, input_shape))
        model.fit_generator(data_generator_wrapper(dataset[:num_train],
                                                   batch_size, input_shape,
                                                   anchors, num_classes),
                            steps_per_epoch=max(1, num_train // batch_size),
                            validation_data=data_generator_wrapper(
                                dataset[num_train:], batch_size, input_shape,
                                anchors, num_classes),
                            validation_steps=max(1, num_val // batch_size),
                            epochs=args.total_epoch,
                            initial_epoch=epochs,
                            callbacks=callbacks)

    # Finally store model
    if args.model_pruning:
        model = sparsity.strip_pruning(model)
    model.save(log_dir + 'trained_final.h5')
예제 #10
0
def _main(path_dataset,
          path_anchors,
          path_weights=None,
          path_output='.',
          path_config=None,
          path_classes=None,
          nb_gpu=1,
          **kwargs):

    config = load_config(path_config, DEFAULT_CONFIG)
    anchors = get_anchors(path_anchors)
    nb_classes = get_nb_classes(path_dataset)
    logging.info('Using %i classes', nb_classes)
    _export_classes(get_dataset_class_names(path_dataset, path_classes),
                    path_output)

    # make sure you know what you freeze
    model, bottleneck_model, last_layer_model = create_model_bottleneck(
        config['image-size'],
        anchors,
        nb_classes,
        freeze_body=2,
        weights_path=path_weights,
        nb_gpu=nb_gpu)

    log_tb = TensorBoard(log_dir=path_output)
    checkpoint = ModelCheckpoint(os.path.join(path_output, NAME_CHECKPOINT),
                                 monitor='val_loss',
                                 save_weights_only=True,
                                 save_best_only=True,
                                 period=3)
    reduce_lr = ReduceLROnPlateau(monitor='val_loss',
                                  factor=0.1,
                                  patience=3,
                                  verbose=1)
    early_stopping = EarlyStopping(monitor='val_loss',
                                   min_delta=0,
                                   patience=10,
                                   verbose=1)

    lines_train, lines_valid, num_val, num_train = load_training_lines(
        path_dataset, config['valid-split'])

    # Train with frozen layers first, to get a stable loss.
    # Adjust num epochs to your dataset. This step is enough to obtain a not bad model.
    _yolo_loss = lambda y_true, y_pred: y_pred  # use custom yolo_loss Lambda layer.
    _data_gene_bottleneck = partial(
        generator_bottleneck,
        batch_size=config['batch-size']['bottlenecks'],
        input_shape=config['image-size'],
        anchors=anchors,
        nb_classes=nb_classes,
        **config['generator'])
    _data_generator = partial(data_generator,
                              input_shape=config['image-size'],
                              anchors=anchors,
                              nb_classes=nb_classes,
                              **config['generator'])

    epochs_head = config['epochs'].get('head', 0)
    epochs_btnc = config['epochs'].get('bottlenecks', 0)
    if epochs_btnc > 0 or epochs_head > 0:
        # perform bottleneck training
        path_bottlenecks = os.path.join(path_output, NAME_BOTTLENECKS)
        if not os.path.isfile(
                path_bottlenecks) or config['recompute-bottlenecks']:
            logging.info('calculating bottlenecks')
            bottlenecks = bottleneck_model.predict_generator(
                _data_generator(
                    lines_train + lines_valid,
                    randomize=False,
                    batch_size=config['batch-size']['bottlenecks']),
                steps=(len(lines_train + lines_valid) //
                       config['batch-size']['bottlenecks']) + 1,
                max_queue_size=1)
            np.savez(path_bottlenecks,
                     bot0=bottlenecks[0],
                     bot1=bottlenecks[1],
                     bot2=bottlenecks[2])

        # load bottleneck features from file
        dict_bot = np.load(path_bottlenecks)
        bottlenecks_train = [
            dict_bot[bot_][:num_train] for bot_ in ("bot0", "bot1", "bot2")
        ]
        bottlenecks_val = [
            dict_bot[bot_][num_train:] for bot_ in ("bot0", "bot1", "bot2")
        ]

        # train last layers with fixed bottleneck features
        logging.info(
            'Training last layers with bottleneck features '
            'with %i samples, val on %i samples and batch size %i.', num_train,
            num_val, config['batch-size']['bottlenecks'])
        last_layer_model.compile(optimizer='adam',
                                 loss={'yolo_loss': _yolo_loss})
        t_start = time.time()
        last_layer_model.fit_generator(
            _data_gene_bottleneck(lines_train, bottlenecks=bottlenecks_train),
            steps_per_epoch=max(
                1, num_train // config['batch-size']['bottlenecks']),
            validation_data=_data_gene_bottleneck(lines_valid,
                                                  bottlenecks=bottlenecks_val),
            validation_steps=max(
                1, num_val // config['batch-size']['bottlenecks']),
            epochs=epochs_btnc,
            initial_epoch=0,
            max_queue_size=1)
        _export_model(model, path_output, '', '_bottleneck')

        # train last layers with random augmented data
        model.compile(optimizer=Adam(lr=1e-3),
                      loss={'yolo_loss':
                            _yolo_loss})  # use custom yolo_loss Lambda layer.
        logging.info(
            'Train on %i samples, val on %i samples, with batch size %i.',
            num_train, num_val, config['batch-size']['head'])
        t_start = time.time()
        model.fit_generator(
            _data_generator(lines_train,
                            batch_size=config['batch-size']['head']),
            steps_per_epoch=max(1, num_train // config['batch-size']['head']),
            validation_data=_data_generator(
                lines_valid, batch_size=config['batch-size']['head']),
            validation_steps=max(1, num_val // config['batch-size']['head']),
            epochs=epochs_btnc + epochs_head,
            initial_epoch=epochs_btnc,
            callbacks=[log_tb, checkpoint])
        logging.info('Training took %f minutes', (time.time() - t_start) / 60.)
        _export_model(model, path_output, '', '_head')

    # Unfreeze and continue training, to fine-tune.
    # Train longer if the result is not good.
    if config['epochs'].get('full', 0) > 0:
        for i in range(len(model.layers)):
            model.layers[i].trainable = True
        model.compile(optimizer=Adam(lr=1e-4),
                      loss={
                          'yolo_loss': lambda y_true, y_pred: y_pred
                      })  # recompile to apply the change
        logging.info('Unfreeze all of the layers.')

        # note that more GPU memory is required after unfreezing the body
        logging.info(
            'Train on %i samples, val on %i samples, with batch size %i.',
            num_train, num_val, config['batch-size']['full'])
        t_start = time.time()
        model.fit_generator(
            _data_generator(lines_train,
                            batch_size=config['batch-size']['full']),
            steps_per_epoch=max(1, num_train // config['batch-size']['full']),
            validation_data=_data_generator(
                lines_valid, batch_size=config['batch-size']['full']),
            validation_steps=max(1, num_val // config['batch-size']['full']),
            epochs=epochs_btnc + epochs_head + config['epochs']['full'],
            initial_epoch=epochs_btnc + epochs_head,
            callbacks=[log_tb, checkpoint, reduce_lr, early_stopping])
        logging.info('Training took %f minutes', (time.time() - t_start) / 60.)
        _export_model(model, path_output, '', '_final')
def train(FLAGS):
    """Train yolov3 with different backbone
    """
    prune = FLAGS['prune']
    opt = FLAGS['opt']
    backbone = FLAGS['backbone']
    log_dir = FLAGS['log_directory'] or os.path.join(
        'logs',
        str(backbone).split('.')[1].lower() + str(datetime.date.today()))
    if tf.io.gfile.exists(log_dir) is not True:
        tf.io.gfile.mkdir(log_dir)
    batch_size = FLAGS['batch_size']
    train_dataset_glob = FLAGS['train_dataset']
    val_dataset_glob = FLAGS['val_dataset']
    test_dataset_glob = FLAGS['test_dataset']
    freeze = FLAGS['freeze']
    freeze_step = FLAGS['epochs'][0]
    train_step = FLAGS['epochs'][1]

    if opt == OPT.DEBUG:
        tf.config.experimental_run_functions_eagerly(True)
        tf.debugging.set_log_device_placement(True)
        tf.get_logger().setLevel(tf.logging.DEBUG)
    elif opt == OPT.XLA:
        config = tf.ConfigProto()
        config.graph_options.optimizer_options.global_jit_level = tf.OptimizerOptions.ON_1
        sess = tf.Session(config=config)
        tf.keras.backend.set_session(sess)

    class_names = get_classes(FLAGS['classes_path'])
    num_classes = len(class_names)
    anchors = get_anchors(FLAGS['anchors_path'])
    input_shape = FLAGS['input_size']  # multiple of 32, hw
    model_path = FLAGS['model']
    if model_path and model_path.endswith('.h5') is not True:
        model_path = tf.train.latest_checkpoint(model_path)
    lr = FLAGS['learning_rate']
    tpu_address = FLAGS['tpu_address']
    if tpu_address is not None:
        cluster_resolver = tf.distribute.cluster_resolver.TPUClusterResolver(
            tpu=tpu_address)
        tf.config.experimental_connect_to_host(cluster_resolver.master())
        tf.tpu.experimental.initialize_tpu_system(cluster_resolver)
        strategy = tf.distribute.experimental.TPUStrategy(cluster_resolver)
    else:
        strategy = tf.distribute.MirroredStrategy(devices=FLAGS['gpus'])

    batch_size = batch_size * strategy.num_replicas_in_sync

    train_dataset_builder = Dataset(train_dataset_glob, batch_size, anchors,
                                    num_classes, input_shape)
    train_dataset, train_num = train_dataset_builder.build()
    val_dataset_builder = Dataset(val_dataset_glob,
                                  batch_size,
                                  anchors,
                                  num_classes,
                                  input_shape,
                                  mode=DATASET_MODE.VALIDATE)
    val_dataset, val_num = val_dataset_builder.build()
    map_callback = MAPCallback(test_dataset_glob, input_shape, anchors,
                               class_names)
    logging = tf.keras.callbacks.TensorBoard(write_graph=False,
                                             log_dir=log_dir,
                                             write_images=True)
    checkpoint = tf.keras.callbacks.ModelCheckpoint(os.path.join(
        log_dir, 'ep{epoch:03d}-loss{loss:.3f}-val_loss{val_loss:.3f}.h5'),
                                                    monitor='val_loss',
                                                    save_weights_only=False,
                                                    save_best_only=False,
                                                    period=1)
    cos_lr = tf.keras.callbacks.LearningRateScheduler(
        lambda epoch, _: tf.keras.experimental.CosineDecay(lr[1], train_step)
        (epoch - freeze_step).numpy(), 1)
    early_stopping = tf.keras.callbacks.EarlyStopping(
        monitor='val_loss',
        min_delta=0,
        patience=(freeze_step + train_step) // 10,
        verbose=0)
    if tf.version.VERSION.startswith('1.'):
        loss = [
            lambda y_true, yolo_output: YoloLoss(
                y_true, yolo_output, 0, anchors, print_loss=True)
        ]
    else:
        loss = [
            YoloLoss(idx, anchors, print_loss=False)
            for idx in range(len(anchors) // 3)
        ]

    with strategy.scope():
        #factory = ModelFactory(tf.keras.layers.Input(shape=(*input_shape, 3)),
        #                       weights_path=model_path)
        factory = ModelFactory(tf.keras.layers.Input(shape=(*input_shape, 3)))
        if backbone == BACKBONE.MOBILENETV2:
            model = factory.build(mobilenetv2_yolo_body,
                                  20,
                                  len(anchors) // 1,
                                  num_classes,
                                  alpha=1.0)
        elif backbone == BACKBONE.DARKNET53:
            model = factory.build(darknet_yolo_body, 185,
                                  len(anchors) // 3, num_classes)
        elif backbone == BACKBONE.EFFICIENTNET:
            FLAGS['model_name'] = 'efficientnet-b4'
            model = factory.build(
                efficientnet_yolo_body,
                20,  # todo
                FLAGS['model_name'],
                len(anchors) // 2,
                batch_norm_momentum=0.9,
                batch_norm_epsilon=1e-3,
                num_classes=num_classes,
                drop_connect_rate=0.2,
                data_format="channels_first")

    if prune:
        from tensorflow_model_optimization.python.core.api.sparsity import keras as sparsity
        end_step = np.ceil(1.0 * train_num / batch_size).astype(
            np.int32) * train_step
        new_pruning_params = {
            'pruning_schedule':
            sparsity.PolynomialDecay(initial_sparsity=0.5,
                                     final_sparsity=0.9,
                                     begin_step=0,
                                     end_step=end_step,
                                     frequency=1000)
        }
        pruned_model = sparsity.prune_low_magnitude(model,
                                                    **new_pruning_params)
        pruned_model.compile(optimizer=tf.keras.optimizers.Adam(lr[0],
                                                                epsilon=1e-8),
                             loss=loss)
        pruned_model.fit(train_dataset,
                         epochs=train_step,
                         initial_epoch=0,
                         steps_per_epoch=max(1, train_num // batch_size),
                         callbacks=[
                             checkpoint, cos_lr, logging, map_callback,
                             early_stopping
                         ],
                         validation_data=val_dataset,
                         validation_steps=max(1, val_num // batch_size))
        model = sparsity.strip_pruning(pruned_model)
        model.save_weights(
            os.path.join(
                log_dir,
                str(backbone).split('.')[1].lower() +
                '_trained_weights_pruned.h5'))
        with zipfile.ZipFile(os.path.join(
                log_dir,
                str(backbone).split('.')[1].lower() +
                '_trained_weights_pruned.h5.zip'),
                             'w',
                             compression=zipfile.ZIP_DEFLATED) as f:
            f.write(
                os.path.join(
                    log_dir,
                    str(backbone).split('.')[1].lower() +
                    '_trained_weights_pruned.h5'))
        return

    # Train with frozen layers first, to get a stable loss.
    # Adjust num epochs to your dataset. This step is enough to obtain a not bad model.
    if freeze is True:
        with strategy.scope():
            model.compile(optimizer=tf.keras.optimizers.Adam(lr[0],
                                                             epsilon=1e-8),
                          loss=loss)
        model.fit(train_dataset,
                  epochs=freeze_step,
                  initial_epoch=0,
                  steps_per_epoch=max(1, train_num // batch_size),
                  callbacks=[logging, checkpoint],
                  validation_data=val_dataset,
                  validation_steps=max(1, val_num // batch_size))
        model.save_weights(
            os.path.join(
                log_dir,
                str(backbone).split('.')[1].lower() +
                '_trained_weights_stage_1.h5'))
    # Unfreeze and continue training, to fine-tune.
    # Train longer if the result is not good.
    else:
        #if 1:
        for i in range(len(model.layers)):
            model.layers[i].trainable = True
        with strategy.scope():
            model.compile(optimizer=tf.keras.optimizers.Adam(lr[1],
                                                             epsilon=1e-8),
                          loss=loss)  # recompile to apply the change
        print('Unfreeze all of the layers.')
        model.fit(
            train_dataset,
            epochs=train_step + freeze_step,
            initial_epoch=freeze_step,
            steps_per_epoch=max(1, train_num // batch_size),
            callbacks=[
                checkpoint,
                cos_lr,
                logging,
                early_stopping  #map_callback
            ],
            validation_data=val_dataset,
            validation_steps=max(1, val_num // batch_size))
        model.save_weights(
            os.path.join(
                log_dir,
                str(backbone).split('.')[1].lower() +
                '_trained_weights_final.h5'))
예제 #12
0
def _main():

    # 这些路径可以自定义
    fig_path = 'fig/fackoff823.png'
    log_dir = 'logs/fackoff823/'

    # 训练数据及模型参数路径
    classes_path = 'model_data/my_classes.txt'
    anchors_path = 'model_data/320_224.txt'
    annotation_path = 'data/all.txt'

    class_names = get_classes(classes_path)
    num_classes = len(class_names)
    anchors = get_anchors(anchors_path)

    input_shape = (224, 320)  # multiple of 32, hw

    is_tiny_version = len(anchors) == 6  # default setting
    if is_tiny_version:
        model = create_tiny_model(input_shape,
                                  anchors,
                                  num_classes,
                                  freeze_body=2,
                                  load_pretrained=0,
                                  weights_path='others/detect8.5.h5')
    else:
        model = create_model(
            input_shape,
            anchors,
            num_classes,
            freeze_body=2,
            weights_path=
            'logs/yolo_body_mobilenetv2_5_93843/trained_weights_final.h5'
        )  # make sure you know what you freeze

    logging = TensorBoard(log_dir=log_dir)
    checkpoint = ModelCheckpoint(
        log_dir + 'ep{epoch:03d}-loss{loss:.3f}-val_loss{val_loss:.3f}.h5',
        monitor='val_loss',
        save_weights_only=True,
        save_best_only=True,
        period=3)
    reduce_lr = ReduceLROnPlateau(monitor='val_loss',
                                  factor=0.5,
                                  patience=3,
                                  verbose=1)
    early_stopping = EarlyStopping(monitor='val_loss',
                                   min_delta=0,
                                   patience=10,
                                   verbose=1)

    val_split = 0.1
    with open(annotation_path) as f:
        lines = f.readlines()
    np.random.seed(21552)
    np.random.shuffle(lines)
    np.random.seed(None)
    num_val = int(len(lines) * val_split)
    num_train = len(lines) - num_val

    # Train with frozen layers first, to get a stable loss.
    # Adjust num epochs to your dataset. This step is enough to obtain a not bad model.
    if 0:
        model.compile(
            optimizer=Adam(lr=1e-3),
            loss={
                # use custom yolo_loss Lambda layer.
                'yolo_loss': lambda y_true, y_pred: y_pred
            })

        batch_size = 32
        model.summary()
        print('Train on {} samples, val on {} samples, with batch size {}.'.
              format(num_train, num_val, batch_size))
        history = model.fit_generator(
            data_generator_wrapper(lines[:num_train], batch_size, input_shape,
                                   anchors, num_classes),
            steps_per_epoch=max(1, num_train // batch_size),
            validation_data=data_generator_wrapper(lines[num_train:],
                                                   batch_size, input_shape,
                                                   anchors, num_classes),
            validation_steps=max(1, num_val // batch_size),
            epochs=100,
            initial_epoch=0,
            callbacks=[logging, checkpoint])
        model.save_weights(log_dir + 'trained_weights_stage_1.h5')

    # Unfreeze and continue training, to fine-tune.
    # Train longer if the result is not good.
    if 1:
        model.summary()
        for i in range(len(model.layers)):
            model.layers[i].trainable = True
        # recompile to apply the change
        model.compile(optimizer=Adam(lr=1e-3),
                      loss={
                          'yolo_loss': lambda y_true, y_pred: y_pred
                      })
        print('Unfreeze all of the layers.')

        batch_size = 32  # note that more GPU memory is required after unfreezing the body
        print('Train on {} samples, val on {} samples, with batch size {}.'.
              format(num_train, num_val, batch_size))
        history = model.fit_generator(
            data_generator_wrapper(lines[:num_train], batch_size, input_shape,
                                   anchors, num_classes),
            steps_per_epoch=max(1, num_train // batch_size),
            validation_data=data_generator_wrapper(lines[num_train:],
                                                   batch_size, input_shape,
                                                   anchors, num_classes),
            validation_steps=max(1, num_val // batch_size),
            epochs=100,
            initial_epoch=0,
            callbacks=[logging, checkpoint, reduce_lr, early_stopping])
        model.save_weights(log_dir + 'trained_weights_final.h5')

    loss = history.history['loss']
    val_loss = history.history['val_loss']
    epochs = range(len(loss))
    plt.plot(epochs, loss, label='Training loss')
    plt.plot(epochs, val_loss, label='Validation loss')
    plt.title('Training and validation loss')
    plt.legend()
    plt.savefig(fig_path)
    plt.show()
예제 #13
0
def _main(path_dataset,
          path_anchors,
          path_weights=None,
          path_output='.',
          path_config=None,
          path_classes=None,
          nb_gpu=1,
          **kwargs):

    config = load_config(path_config, DEFAULT_CONFIG)
    anchors = get_anchors(path_anchors)

    nb_classes = get_nb_classes(path_dataset)
    logging.info('Using %i classes', nb_classes)
    _export_classes(get_dataset_class_names(path_dataset, path_classes),
                    path_output)

    is_tiny_version = len(anchors) == 6  # default setting
    _create_model = create_model_tiny if is_tiny_version else create_model
    name_prefix = 'tiny-' if is_tiny_version else ''
    model = _create_model(config['image-size'],
                          anchors,
                          nb_classes,
                          freeze_body=2,
                          weights_path=path_weights,
                          nb_gpu=nb_gpu)

    tb_logging = TensorBoard(log_dir=path_output)
    checkpoint = ModelCheckpoint(os.path.join(path_output, NAME_CHECKPOINT),
                                 monitor='val_loss',
                                 save_weights_only=True,
                                 save_best_only=True,
                                 period=3)
    reduce_lr = ReduceLROnPlateau(monitor='val_loss',
                                  verbose=1,
                                  **config.get('CB_learning-rate', {}))
    early_stopping = EarlyStopping(monitor='val_loss',
                                   verbose=1,
                                   **config.get('CB_stopping', {}))

    lines_train, lines_valid, num_val, num_train = load_training_lines(
        path_dataset, config['valid-split'])

    # Train with frozen layers first, to get a stable loss.
    # Adjust num epochs to your dataset. This step is enough to obtain a not bad model.
    # See: https://github.com/qqwweee/keras-yolo3/issues/129#issuecomment-408855511
    _yolo_loss = lambda y_true, y_pred: y_pred[
        0]  # use custom yolo_loss Lambda layer.
    _data_generator = partial(data_generator,
                              input_shape=config['image-size'],
                              anchors=anchors,
                              nb_classes=nb_classes,
                              **config['generator'])

    # Save the model architecture
    with open(os.path.join(path_output, name_prefix + 'yolo_architect.yaml'),
              'w') as fp:
        fp.write(model.to_yaml())

    if config['epochs'].get('head', 0) > 0:
        model.compile(optimizer=Adam(lr=1e-3), loss={'yolo_loss': _yolo_loss})

        logging.info(
            'Train on %i samples, val on %i samples, with batch size %i.',
            num_train, num_val, config['batch-size']['head'])
        t_start = time.time()
        model.fit_generator(
            _data_generator(lines_train,
                            batch_size=config['batch-size']['head']),
            steps_per_epoch=max(1, num_train // config['batch-size']['head']),
            validation_data=_data_generator(lines_valid, augment=False),
            validation_steps=max(1, num_val // config['batch-size']['head']),
            epochs=config['epochs']['head'],
            use_multiprocessing=False,
            initial_epoch=0,
            callbacks=[tb_logging, checkpoint, reduce_lr, early_stopping])
        logging.info('Training took %f minutes', (time.time() - t_start) / 60.)
        _export_model(model, path_output, name_prefix, '_head')

    # Unfreeze and continue training, to fine-tune.
    # Train longer if the result is not good.
    logging.info('Unfreeze all of the layers.')
    for i in range(len(model.layers)):
        model.layers[i].trainable = True
    model.compile(optimizer=Adam(lr=1e-4), loss={'yolo_loss': _yolo_loss})
    logging.info('Train on %i samples, val on %i samples, with batch size %i.',
                 num_train, num_val, config['batch-size']['full'])
    t_start = time.time()
    model.fit_generator(
        _data_generator(lines_train, batch_size=config['batch-size']['full']),
        steps_per_epoch=max(1, num_train // config['batch-size']['full']),
        validation_data=_data_generator(lines_valid, augment=False),
        validation_steps=max(1, num_val // config['batch-size']['full']),
        epochs=config['epochs']['head'] + config['epochs']['full'],
        use_multiprocessing=False,
        initial_epoch=config['epochs']['head'],
        callbacks=[tb_logging, checkpoint, reduce_lr, early_stopping])
    logging.info('Training took %f minutes', (time.time() - t_start) / 60.)
    _export_model(model, path_output, name_prefix, '_final')
예제 #14
0
def main():
    # class YOLO defines the default value, so suppress any default here
    parser = argparse.ArgumentParser(argument_default=argparse.SUPPRESS)
    '''
    Command line options
    '''
    parser.add_argument('--model_path',
                        type=str,
                        required=True,
                        help='path to model weight file')

    parser.add_argument('--anchors_path',
                        type=str,
                        required=True,
                        help='path to anchor definitions')

    parser.add_argument(
        '--classes_path',
        type=str,
        required=True,
        help='path to class definitions, default configs/voc_classes.txt',
        default='configs/voc_classes.txt')

    parser.add_argument('--annotation_file',
                        type=str,
                        required=True,
                        help='annotation txt file to varify')

    parser.add_argument('--eval_type',
                        type=str,
                        help='evaluation type (VOC/COCO), default=VOC',
                        default='VOC')

    parser.add_argument('--iou_threshold',
                        type=float,
                        help='IOU threshold for PascalVOC mAP, default=0.5',
                        default=0.5)

    parser.add_argument(
        '--conf_threshold',
        type=float,
        help=
        'confidence threshold for filtering box in postprocess, default=0.001',
        default=0.001)

    parser.add_argument(
        '--model_image_size',
        type=str,
        help='model image input size as <num>x<num>, default 416x416',
        default='416x416')

    parser.add_argument(
        '--save_result',
        default=False,
        action="store_true",
        help='Save the detection result image in result/detection dir')

    args = parser.parse_args()

    # param parse
    anchors = get_anchors(args.anchors_path)
    class_names = get_classes(args.classes_path)
    height, width = args.model_image_size.split('x')
    model_image_size = (int(height), int(width))

    eval_AP(args.eval_type, args.model_path, args.annotation_file, anchors,
            class_names, args.iou_threshold, args.conf_threshold,
            model_image_size, args.save_result)
예제 #15
0
def _main():
    import os
    os.environ["CUDA_VISIBLE_DEVICES"] = "0,1"
    from keras import backend as K
    config = tf.ConfigProto()

    # 怀疑是gpu版本用的
    # config.gpu_options.allow_growth = True

    sess = tf.Session(config=config)
    K.set_session(sess)

    # annotation_path = 'dataset/WIDER_train.txt'  # 数据
    # annotation_path = 'VOCdevkit/VOC2010/2010_train_label.txt'
    annotation_path = 'data/all.txt'
    classes_path = 'model_data/my_classes.txt'  # 类别

    log_dir = 'logs/384/'  # 日志文件夹

    pretrained_path = 'logs/000/trained_weights_final.h5'  # 预训练模型
    # pretrained_path = 'logs/000/trained_weights_final.h5'  # 预训练模型
    anchors_path = 'model_data/yolo_anchors.txt'  # anchors

    class_names = get_classes(classes_path)  # 类别列表
    num_classes = len(class_names)  # 类别数
    anchors = get_anchors(anchors_path)  # anchors列表

    input_shape = (288, 384)  # 32的倍数,输入图像
    model = create_model(
        input_shape,
        anchors,
        num_classes,
        freeze_body=2,
        load_pretrained=0,
        weights_path=pretrained_path)  # make sure you know what you freeze

    logging = TensorBoard(log_dir=log_dir)
    checkpoint = ModelCheckpoint(
        log_dir + 'ep{epoch:03d}-loss{loss:.3f}-val_loss{val_loss:.3f}.h5',
        monitor='val_loss',
        save_weights_only=True,
        save_best_only=True,
        period=3)  # 只存储weights,
    reduce_lr = ReduceLROnPlateau(monitor='val_loss',
                                  factor=0.1,
                                  patience=3,
                                  verbose=1)  # 当评价指标不在提升时,减少学习率
    early_stopping = EarlyStopping(monitor='val_loss',
                                   min_delta=0,
                                   patience=10,
                                   verbose=1)  # 测试集准确率,下降前终止

    val_split = 0.1  # 训练和验证的比例
    with open(annotation_path) as f:
        lines = f.readlines()
    np.random.seed(47)
    np.random.shuffle(lines)
    np.random.seed(None)
    num_val = int(len(lines) * val_split)  # 验证集数量
    num_train = len(lines) - num_val  # 训练集数量
    """
    把目标当成一个输入,构成多输入模型,把loss写成一个层,作为最后的输出,搭建模型的时候,
    就只需要将模型的output定义为loss,而compile的时候,
    直接将loss设置为y_pred(因为模型的输出就是loss,所以y_pred就是loss),
    无视y_true,训练的时候,y_true随便扔一个符合形状的数组进去就行了。
    """
    if 0:
        model.compile(
            optimizer=Adam(lr=1e-3),
            loss={
                # 使用定制的 yolo_loss Lambda层
                'yolo_loss': lambda y_true, y_pred: y_pred
            })  # 损失函数

        batch_size = 32  # batch尺寸
        print('Train on {} samples, val on {} samples, with batch size {}.'.
              format(num_train, num_val, batch_size))
        history = model.fit_generator(
            data_generator_wrapper(lines[:num_train], batch_size, input_shape,
                                   anchors, num_classes),
            steps_per_epoch=max(1, num_train // batch_size),
            validation_data=data_generator_wrapper(lines[num_train:],
                                                   batch_size, input_shape,
                                                   anchors, num_classes),
            validation_steps=max(1, num_val // batch_size),
            epochs=500,
            initial_epoch=0,
            callbacks=[logging, checkpoint])
        # 存储最终的参数,再训练过程中,通过回调存储
        model.save_weights(log_dir + 'trained_weights_stage_1.h5')

    if 1:  # 全部训练
        for i in range(len(model.layers)):
            model.layers[i].trainable = True

        # 训练初期学习率可以适当大点, 后期可以减小
        model.compile(optimizer=Adam(lr=1e-3),
                      loss={
                          'yolo_loss': lambda y_true, y_pred: y_pred
                      })  # recompile to apply the change
        print('Unfreeze all of the layers.')

        batch_size = 4  # note that more GPU memory is required after unfreezing the body
        print('Train on {} samples, val on {} samples, with batch size {}.'.
              format(num_train, num_val, batch_size))

        history = model.fit_generator(
            data_generator_wrapper(lines[:num_train], batch_size, input_shape,
                                   anchors, num_classes),
            steps_per_epoch=max(1, num_train // batch_size),
            validation_data=data_generator_wrapper(lines[num_train:],
                                                   batch_size, input_shape,
                                                   anchors, num_classes),
            validation_steps=max(1, num_val // batch_size),
            epochs=20,
            initial_epoch=0,
            callbacks=[logging, checkpoint, reduce_lr, early_stopping])
        model.save_weights(log_dir + 'trained_weights_final.h5')

    loss = history.history['loss']
    val_loss = history.history['val_loss']
    epochs = range(len(loss))
    plt.plot(epochs, loss, label='Training loss')
    plt.plot(epochs, val_loss, label='Validation loss')
    plt.title('Training and validation loss')
    plt.legend()

    plt.show()
예제 #16
0
def train(FLAGS):
    """Train yolov3 with different backbone
    """
    prune = FLAGS['prune']
    opt = FLAGS['opt']
    backbone = FLAGS['backbone']
    log_dir = os.path.join(
        'logs',
        str(backbone).split('.')[1].lower() + '_' + str(datetime.date.today()))

    batch_size = FLAGS['batch_size']
    train_dataset_glob = FLAGS['train_dataset']
    val_dataset_glob = FLAGS['val_dataset']
    test_dataset_glob = FLAGS['test_dataset']
    freeze = FLAGS['freeze']
    epochs = FLAGS['epochs'][0] if freeze else FLAGS['epochs'][1]

    class_names = get_classes(FLAGS['classes_path'])
    num_classes = len(class_names)
    anchors = get_anchors(FLAGS['anchors_path'])
    input_shape = FLAGS['input_size']  # multiple of 32, hw
    model_path = FLAGS['model']
    if model_path and model_path.endswith('.h5') is not True:
        model_path = tf.train.latest_checkpoint(model_path)
    lr = FLAGS['learning_rate']
    tpu_address = FLAGS['tpu_address']
    if tpu_address is not None:
        cluster_resolver = tf.distribute.cluster_resolver.TPUClusterResolver(
            tpu=tpu_address)
        tf.config.experimental_connect_to_host(cluster_resolver.master())
        tf.tpu.experimental.initialize_tpu_system(cluster_resolver)
        strategy = tf.distribute.experimental.TPUStrategy(cluster_resolver)
    else:
        strategy = tf.distribute.MirroredStrategy(devices=FLAGS['gpus'])
    batch_size = batch_size * strategy.num_replicas_in_sync

    train_dataset_builder = Dataset(train_dataset_glob, batch_size, anchors,
                                    num_classes, input_shape)
    train_dataset, train_num = train_dataset_builder.build(epochs)
    val_dataset_builder = Dataset(val_dataset_glob,
                                  batch_size,
                                  anchors,
                                  num_classes,
                                  input_shape,
                                  mode=DATASET_MODE.VALIDATE)
    val_dataset, val_num = val_dataset_builder.build(epochs)
    map_callback = MAPCallback(test_dataset_glob, input_shape, anchors,
                               class_names)
    tensorboard = tf.keras.callbacks.TensorBoard(write_graph=False,
                                                 log_dir=log_dir,
                                                 write_images=True)
    checkpoint = tf.keras.callbacks.ModelCheckpoint(os.path.join(
        log_dir, 'ep{epoch:03d}-loss{loss:.3f}-val_loss{val_loss:.3f}.h5'),
                                                    monitor='val_loss',
                                                    save_weights_only=True,
                                                    save_best_only=True,
                                                    period=3)
    cos_lr = tf.keras.callbacks.LearningRateScheduler(
        lambda epoch, _: tf.keras.experimental.CosineDecay(lr[1], epochs)
        (epoch).numpy(), 1)
    early_stopping = tf.keras.callbacks.EarlyStopping(monitor='val_loss',
                                                      min_delta=0,
                                                      patience=epochs // 5,
                                                      verbose=1)

    loss = [
        YoloLoss(idx, anchors, print_loss=False)
        for idx in range(len(anchors) // 3)
    ]

    adv_config = nsl.configs.make_adv_reg_config(multiplier=0.2,
                                                 adv_step_size=0.2,
                                                 adv_grad_norm='infinity')
    train_dataset = strategy.experimental_distribute_dataset(train_dataset)
    val_dataset = strategy.experimental_distribute_dataset(val_dataset)

    with strategy.scope():
        factory = ModelFactory(tf.keras.layers.Input(shape=(*input_shape, 3)),
                               weights_path=model_path)
        if backbone == BACKBONE.MOBILENETV2:
            model = factory.build(mobilenetv2_yolo_body,
                                  155,
                                  len(anchors) // 3,
                                  num_classes,
                                  alpha=FLAGS['alpha'])
        elif backbone == BACKBONE.DARKNET53:
            model = factory.build(darknet_yolo_body, 185,
                                  len(anchors) // 3, num_classes)
        elif backbone == BACKBONE.EFFICIENTNET:
            model = factory.build(efficientnet_yolo_body,
                                  499,
                                  FLAGS['model_name'],
                                  len(anchors) // 3,
                                  batch_norm_momentum=0.9,
                                  batch_norm_epsilon=1e-3,
                                  num_classes=num_classes,
                                  drop_connect_rate=0.2,
                                  data_format="channels_first")

    # Train with frozen layers first, to get a stable loss.
    # Adjust num epochs to your dataset. This step is enough to obtain a not bad model.
    if freeze is True:
        with strategy.scope():
            model.compile(optimizer=tf.keras.optimizers.Adam(lr[0],
                                                             epsilon=1e-8),
                          loss=loss)
        model.fit(epochs, [
            checkpoint, tensorboard,
            tf.keras.callbacks.LearningRateScheduler((lambda _, lr: lr), 1)
        ], train_dataset, val_dataset)
        model.save_weights(
            os.path.join(
                log_dir,
                str(backbone).split('.')[1].lower() +
                '_trained_weights_stage_1.h5'))
    # Unfreeze and continue training, to fine-tune.
    # Train longer if the result is not good.
    else:
        for i in range(len(model.layers)):
            model.layers[i].trainable = True
        with strategy.scope():
            model.compile(optimizer=tf.keras.optimizers.Adam(lr[1],
                                                             epsilon=1e-8),
                          loss=loss)  # recompile to apply the change
        print('Unfreeze all of the layers.')
        model.fit(epochs, [checkpoint, cos_lr, tensorboard, early_stopping],
                  train_dataset,
                  val_dataset,
                  use_adv=False)
        model.save_weights(
            os.path.join(
                log_dir,
                str(backbone).split('.')[1].lower() +
                '_trained_weights_final.h5'))