예제 #1
0
def main(_argv):
    class_names = [c.strip() for c in open(FLAGS.classes).readlines()]
    logging.info('classes loaded')

    dataset = load_tfrecord_dataset(FLAGS.dataset, FLAGS.classes, FLAGS.size)
    dataset = dataset.shuffle(512)

    for image, labels in dataset.take(1):
        boxes = []
        scores = []
        classes = []
        for x1, y1, x2, y2, label in labels:
            if x1 == 0 and x2 == 0:
                continue

            boxes.append((x1, y1, x2, y2))
            scores.append(1)
            classes.append(label)
        nums = [len(boxes)]
        boxes = [boxes]
        scores = [scores]
        classes = [classes]

        logging.info('labels:')
        for i in range(nums[0]):
            logging.info('\t{}, {}, {}'.format(class_names[int(classes[0][i])],
                                               np.array(scores[0][i]),
                                               np.array(boxes[0][i])))

        img = cv2.cvtColor(image.numpy(), cv2.COLOR_RGB2BGR)
        img = draw_outputs(img, (boxes, scores, classes, nums), class_names)
        cv2.imwrite(FLAGS.output, img)
        logging.info('output saved to: {}'.format(FLAGS.output))
예제 #2
0
def main(_argv):
    physical_devices = tf.config.experimental.list_physical_devices('GPU')
    if len(physical_devices) > 0:
        tf.config.experimental.set_memory_growth(physical_devices[0], True)

    if FLAGS.tiny:
        yolo = YoloV3Tiny(classes=FLAGS.num_classes)
    else:
        yolo = YoloV3(classes=FLAGS.num_classes)

    yolo.load_weights(FLAGS.weights).expect_partial()
    logging.info('weights loaded')

    class_names = [c.strip() for c in open(FLAGS.classes).readlines()]
    logging.info('classes loaded')

    if FLAGS.tfrecord:
        dataset = load_tfrecord_dataset(FLAGS.tfrecord, FLAGS.classes,
                                        FLAGS.size)
        dataset = dataset.shuffle(512)
        img_raw, _label = next(iter(dataset.take(1)))
    else:
        img_raw = tf.image.decode_image(open(FLAGS.image, 'rb').read(),
                                        channels=3)

    img = tf.expand_dims(img_raw, 0)
    img = transform_images(img, FLAGS.size)

    t1 = time.time()
    boxes, scores, classes, nums = yolo(img)
    t2 = time.time()
    logging.info('time: {}'.format(t2 - t1))

    logging.info('detections:')
    for i in range(nums[0]):
        logging.info('\t{}, {}, {}'.format(class_names[int(classes[0][i])],
                                           np.array(scores[0][i]),
                                           np.array(boxes[0][i])))

    img = cv2.cvtColor(img_raw.numpy(), cv2.COLOR_RGB2BGR)
    img = draw_outputs(img, (boxes, scores, classes, nums), class_names)
    cv2.imwrite(FLAGS.output, img)
    logging.info('output saved to: {}'.format(FLAGS.output))
예제 #3
0
    77: 'teddy bear',
    78: 'hair drier',
    79: 'toothbrush',
}


def draw_labels(x, y):
    img = x.numpy()
    boxes, classes = tf.split(y, (4, 1), axis=-1)
    classes = classes[..., 0]
    wh = np.flip(img.shape[0])
    for i in range(len(boxes)):
        temp = boxes[i] * wh
        x1y1 = tuple((np.array(temp[0:2])).astype(np.int32))
        x2y2 = tuple((np.array(temp[2:4])).astype(np.int32))
        img = cv2.rectangle(img, x1y1, x2y2, (255, 0, 0), 2)
        img = cv2.putText(img, category_map[classes[i].numpy()], x1y1,
                          cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (0, 0, 255), 2)
    return img


train_path = "/home/moyez/Documents/Code/Python/M-NET/coco_train.record"

train_dataset = dataset.load_tfrecord_dataset(train_path)

test = train_dataset.take(4)

for x, y in test:
    img = draw_labels(x, y)
    plt.imshow(img.astype(dtype='uint8'))
예제 #4
0
def main(_argv):
    physical_devices = tf.config.experimental.list_physical_devices('GPU')

    # Setup
    if FLAGS.multi_gpu:
        for physical_device in physical_devices:
            tf.config.experimental.set_memory_growth(physical_device, True)

        strategy = tf.distribute.MirroredStrategy()
        print('Number of devices: {}'.format(strategy.num_replicas_in_sync))
        BATCH_SIZE = FLAGS.batch_size * strategy.num_replicas_in_sync
        FLAGS.batch_size = BATCH_SIZE

        with strategy.scope():
            model, optimizer, loss, anchors, anchor_masks = setup_model()
    else:
        model, optimizer, loss, anchors, anchor_masks = setup_model()

    if FLAGS.dataset:
        train_dataset = dataset.load_tfrecord_dataset(
            FLAGS.dataset, FLAGS.classes, FLAGS.size)
    else:
        train_dataset = dataset.load_fake_dataset()
    train_dataset = train_dataset.shuffle(buffer_size=512)
    train_dataset = train_dataset.batch(FLAGS.batch_size)
    train_dataset = train_dataset.map(lambda x, y: (
        dataset.transform_images(x, FLAGS.size),
        dataset.transform_targets(y, anchors, anchor_masks, FLAGS.size)))
    train_dataset = train_dataset.prefetch(
        buffer_size=tf.data.experimental.AUTOTUNE)

    if FLAGS.val_dataset:
        val_dataset = dataset.load_tfrecord_dataset(
            FLAGS.val_dataset, FLAGS.classes, FLAGS.size)
    else:
        val_dataset = dataset.load_fake_dataset()
    val_dataset = val_dataset.batch(FLAGS.batch_size)
    val_dataset = val_dataset.map(lambda x, y: (
        dataset.transform_images(x, FLAGS.size),
        dataset.transform_targets(y, anchors, anchor_masks, FLAGS.size)))

    if FLAGS.mode == 'eager_tf':
        # Eager mode is great for debugging
        # Non eager graph mode is recommended for real training
        avg_loss = tf.keras.metrics.Mean('loss', dtype=tf.float32)
        avg_val_loss = tf.keras.metrics.Mean('val_loss', dtype=tf.float32)

        for epoch in range(1, FLAGS.epochs + 1):
            for batch, (images, labels) in enumerate(train_dataset):
                with tf.GradientTape() as tape:
                    outputs = model(images, training=True)
                    regularization_loss = tf.reduce_sum(model.losses)
                    pred_loss = []
                    for output, label, loss_fn in zip(outputs, labels, loss):
                        pred_loss.append(loss_fn(label, output))
                    total_loss = tf.reduce_sum(pred_loss) + regularization_loss

                grads = tape.gradient(total_loss, model.trainable_variables)
                optimizer.apply_gradients(
                    zip(grads, model.trainable_variables))

                logging.info("{}_train_{}, {}, {}".format(
                    epoch, batch, total_loss.numpy(),
                    list(map(lambda x: np.sum(x.numpy()), pred_loss))))
                avg_loss.update_state(total_loss)

            for batch, (images, labels) in enumerate(val_dataset):
                outputs = model(images)
                regularization_loss = tf.reduce_sum(model.losses)
                pred_loss = []
                for output, label, loss_fn in zip(outputs, labels, loss):
                    pred_loss.append(loss_fn(label, output))
                total_loss = tf.reduce_sum(pred_loss) + regularization_loss

                logging.info("{}_val_{}, {}, {}".format(
                    epoch, batch, total_loss.numpy(),
                    list(map(lambda x: np.sum(x.numpy()), pred_loss))))
                avg_val_loss.update_state(total_loss)

            logging.info("{}, train: {}, val: {}".format(
                epoch,
                avg_loss.result().numpy(),
                avg_val_loss.result().numpy()))

            avg_loss.reset_states()
            avg_val_loss.reset_states()
            model.save_weights(
                'checkpoints/yolov3_train_{}.tf'.format(epoch))
    else:
        callbacks = [
            ReduceLROnPlateau(verbose=1),
            EarlyStopping(patience=3, verbose=1),
            ModelCheckpoint('checkpoints/yolov3_train_{epoch}.tf',
                            verbose=1, save_weights_only=True),
            TensorBoard(log_dir='logs')
        ]

        start_time = time.time()
        history = model.fit(train_dataset,
                            epochs=FLAGS.epochs,
                            callbacks=callbacks,
                            validation_data=val_dataset)
        end_time = time.time() - start_time
        print(f'Total Training Time: {end_time}')
예제 #5
0
def main(_argv):
    physical_devices = tf.config.experimental.list_physical_devices('GPU')
    if len(physical_devices) > 0:
        tf.config.experimental.set_memory_growth(physical_devices[0], True)

    if FLAGS.tiny:
        model = YoloV3Tiny(FLAGS.size,
                           training=True,
                           classes=FLAGS.num_classes)
        anchors = yolo_tiny_anchors
        anchor_masks = yolo_tiny_anchor_masks
    else:
        model = YoloV3(FLAGS.size, training=True, classes=FLAGS.num_classes)
        anchors = yolo_anchors
        anchor_masks = yolo_anchor_masks

    #train_dataset = dataset.load_fake_dataset()
    if FLAGS.dataset:
        train_dataset = dataset.load_tfrecord_dataset(FLAGS.dataset,
                                                      FLAGS.classes,
                                                      FLAGS.size)
    else:
        assert False, "You need to load a Training dataset"
    train_dataset = train_dataset.shuffle(buffer_size=512)
    train_dataset = train_dataset.batch(FLAGS.batch_size)
    train_dataset = train_dataset.map(lambda x, y: (
        dataset.transform_images(x, FLAGS.size),
        dataset.transform_targets(y, anchors, anchor_masks, FLAGS.size)))
    train_dataset = train_dataset.prefetch(
        buffer_size=tf.data.experimental.AUTOTUNE)

    #val_dataset = dataset.load_fake_dataset()
    if FLAGS.val_dataset:
        val_dataset = dataset.load_tfrecord_dataset(FLAGS.val_dataset,
                                                    FLAGS.classes, FLAGS.size)
    else:
        assert False, "You need to load a Validation dataset"

    val_dataset = val_dataset.batch(FLAGS.batch_size)
    val_dataset = val_dataset.map(lambda x, y: (
        dataset.transform_images(x, FLAGS.size),
        dataset.transform_targets(y, anchors, anchor_masks, FLAGS.size)))

    # Configure the model for transfer learning
    if FLAGS.transfer == 'none':
        pass  # Nothing to do
    elif FLAGS.transfer in ['darknet', 'no_output']:
        # Darknet transfer is a special case that works
        # with incompatible number of classes

        # reset top layers
        if FLAGS.tiny:
            model_pretrained = YoloV3Tiny(FLAGS.size,
                                          training=True,
                                          classes=FLAGS.weights_num_classes
                                          or FLAGS.num_classes)
        else:
            model_pretrained = YoloV3(FLAGS.size,
                                      training=True,
                                      classes=FLAGS.weights_num_classes
                                      or FLAGS.num_classes)
        model_pretrained.load_weights(FLAGS.weights)

        if FLAGS.transfer == 'darknet':
            model.get_layer('yolo_darknet').set_weights(
                model_pretrained.get_layer('yolo_darknet').get_weights())
            freeze_all(model.get_layer('yolo_darknet'))

        elif FLAGS.transfer == 'no_output':
            for l in model.layers:
                if not l.name.startswith('yolo_output'):
                    l.set_weights(
                        model_pretrained.get_layer(l.name).get_weights())
                    freeze_all(l)

    else:
        # All other transfer require matching classes
        model.load_weights(FLAGS.weights)
        if FLAGS.transfer == 'fine_tune':
            # freeze darknet and fine tune other layers
            darknet = model.get_layer('yolo_darknet')
            freeze_all(darknet)
        elif FLAGS.transfer == 'frozen':
            # freeze everything
            freeze_all(model)

    optimizer = tf.keras.optimizers.Adam(lr=FLAGS.learning_rate)
    loss = [
        YoloLoss(anchors[mask], classes=FLAGS.num_classes)
        for mask in anchor_masks
    ]

    if FLAGS.mode == 'eager_tf':
        # Eager mode is great for debugging
        # Non eager graph mode is recommended for real training
        avg_loss = tf.keras.metrics.Mean('loss', dtype=tf.float32)
        avg_val_loss = tf.keras.metrics.Mean('val_loss', dtype=tf.float32)

        for epoch in range(1, FLAGS.epochs + 1):
            for batch, (images, labels) in enumerate(train_dataset):
                with tf.GradientTape() as tape:
                    outputs = model(images, training=True)
                    regularization_loss = tf.reduce_sum(model.losses)
                    pred_loss = []
                    for output, label, loss_fn in zip(outputs, labels, loss):
                        pred_loss.append(loss_fn(label, output))
                    total_loss = tf.reduce_sum(pred_loss) + regularization_loss

                grads = tape.gradient(total_loss, model.trainable_variables)
                optimizer.apply_gradients(zip(grads,
                                              model.trainable_variables))

                logging.info("{}_train_{}, {}, {}".format(
                    epoch, batch, total_loss.numpy(),
                    list(map(lambda x: np.sum(x.numpy()), pred_loss))))
                avg_loss.update_state(total_loss)

            for batch, (images, labels) in enumerate(val_dataset):
                outputs = model(images)
                regularization_loss = tf.reduce_sum(model.losses)
                pred_loss = []
                for output, label, loss_fn in zip(outputs, labels, loss):
                    pred_loss.append(loss_fn(label, output))
                total_loss = tf.reduce_sum(pred_loss) + regularization_loss

                logging.info("{}_val_{}, {}, {}".format(
                    epoch, batch, total_loss.numpy(),
                    list(map(lambda x: np.sum(x.numpy()), pred_loss))))
                avg_val_loss.update_state(total_loss)

            logging.info("{}, train: {}, val: {}".format(
                epoch,
                avg_loss.result().numpy(),
                avg_val_loss.result().numpy()))

            avg_loss.reset_states()
            avg_val_loss.reset_states()
            model.save_weights('checkpoints/yolov3_train_{}.tf'.format(epoch))
    else:
        model.compile(optimizer=optimizer,
                      loss=loss,
                      run_eagerly=(FLAGS.mode == 'eager_fit'))

        callbacks = [
            ReduceLROnPlateau(verbose=1),
            EarlyStopping(patience=3, verbose=1),
            ModelCheckpoint('checkpoints/yolov3_train_{epoch}.tf',
                            verbose=1,
                            save_weights_only=True),
            TensorBoard(log_dir='logs')
        ]

        history = model.fit(train_dataset,
                            epochs=FLAGS.epochs,
                            callbacks=callbacks,
                            validation_data=val_dataset)
def main(_):
    cfg = None
    with open(FLAGS.cfg_path, 'r') as f:
        cfg = yaml.load(f, Loader=yaml.Loader)

    model = ArcFaceModel(logit_type=cfg['logit_type'],
                         size=cfg['input_size'],
                         num_classes=cfg['num_classes'],
                         embd_shape=cfg['embd_shape'],
                         training=True,
                         margin=cfg['margin'],
                         w_l2factor=cfg['w_l2factor'],
                         logist_scale=cfg['logist_scale'])

    dataset_len = cfg['num_samples']
    steps_per_epoch = dataset_len // cfg['batch_size']
    train_dataset = load_tfrecord_dataset(cfg['train_dataset'],
                                          cfg['batch_size'],
                                          training=True)
    train_dataset = iter(train_dataset)

    learning_rate = tf.constant(cfg['base_lr'])
    print('learning rate = {:.9f}'.format(learning_rate))
    #optimizer = tf.keras.optimizers.Adam(lr=learning_rate)
    optimizer = tf.keras.optimizers.SGD(lr=learning_rate,
                                        momentum=0.9,
                                        nesterov=True)
    loss_func = LossFunc(cfg['loss_func'], cfg['logist_scale'])

    ckpt_dir = cfg['ckpt_dir']
    ckpt_path = tf.train.latest_checkpoint(ckpt_dir)
    if ckpt_path is not None:
        print('[*] load ckpt from {}'.format(ckpt_path))
        model.load_weights(ckpt_path)
        epochs, steps = get_ckpt_inf(ckpt_path, steps_per_epoch)
    else:
        print('[*] train from scratch')
        epochs, steps = 1, 1
    summary_writer = tf.summary.create_file_writer(cfg['log_dir'])
    additional_loss_weight = cfg['additional_loss_weight']

    for layer in model.layers:
        layer.trainable = True
    model.summary(line_length=80)

    total_loss_sum = tf.Variable(0.)
    reg_loss_sum = tf.Variable(0.)
    pred_loss_sum = tf.Variable(0.)
    additional_loss_sum = tf.Variable(0.)
    acc_sum = tf.Variable(0.)

    while epochs <= cfg['epochs']:
        total_loss_sum = 0.
        reg_loss_sum = 0.
        pred_loss_sum = 0.
        additional_loss_sum = 0.
        acc_sum = 0.

        for batch_num in range(steps_per_epoch):
            imgs, labels = next(train_dataset)
            with tf.GradientTape() as tape:
                embededs, logits = model(imgs)

                acc = tf.reduce_sum(
                    tf.cast(
                        tf.math.equal(labels, tf.math.argmax(logits, axis=1)),
                        tf.float32))
                acc_sum = tf.math.add(acc_sum, acc)

                reg_loss = tf.reduce_sum(model.losses)
                pred_loss, additional_loss = loss_func(labels, logits,
                                                       embededs)
                total_loss = tf.math.add(
                    tf.math.add(pred_loss, reg_loss),
                    tf.multiply(additional_loss, additional_loss_weight))

                reg_loss_sum = tf.math.add(reg_loss_sum, reg_loss)
                pred_loss_sum = tf.math.add(pred_loss_sum, pred_loss)
                additional_loss_sum = tf.math.add(additional_loss_sum,
                                                  additional_loss)
                total_loss_sum = tf.math.add(total_loss_sum, total_loss)

            grads = tape.gradient(total_loss, model.trainable_variables)
            optimizer.apply_gradients(zip(grads, model.trainable_variables))
            steps += 1

            if steps % cfg['save_steps'] == 0:
                print('[*] save ckpt file!')
                pre_ckpt_path = ckpt_path
                ckpt_path = os.path.join(
                    ckpt_dir, 'e_{}_b_{}.ckpt'.format(epochs,
                                                      steps % steps_per_epoch))
                model.save_weights(ckpt_path)
                if pre_ckpt_path != None:
                    os.system('rm -f {}*'.format(pre_ckpt_path))

        reg_loss_sum = tf.math.divide(reg_loss_sum, steps_per_epoch)
        pred_loss_sum = tf.math.divide(pred_loss_sum, steps_per_epoch)
        additional_loss_sum = tf.math.divide(additional_loss_sum,
                                             steps_per_epoch)
        total_loss_sum = tf.math.divide(total_loss_sum, steps_per_epoch)
        acc_sum = tf.math.multiply(tf.math.divide(acc_sum, dataset_len), 100)
        print('Epoch {}/{}, loss={:.4f}, acc={:.2f}'.format(
            epochs, cfg['epochs'], total_loss_sum.numpy(), acc_sum.numpy()),
              flush=True,
              end='')
        print(', reg={:.4f}, pred={:.4f}, add={:.4f}'.format(
            reg_loss_sum.numpy(), pred_loss_sum.numpy(),
            additional_loss_sum.numpy()),
              flush=True)

        with summary_writer.as_default():
            tf.summary.scalar('loss/reg loss', reg_loss_sum, step=steps)
            tf.summary.scalar('loss/pred loss', pred_loss_sum, step=steps)
            tf.summary.scalar('loss/additional_loss',
                              additional_loss_sum,
                              step=steps)
            tf.summary.scalar('loss/total loss', total_loss_sum, step=steps)
            tf.summary.scalar('acc', acc_sum, step=steps)

        epochs += 1
예제 #7
0
def main():
    config = tf.compat.v1.ConfigProto()
    config.gpu_options.allow_growth = True
    session = tf.compat.v1.Session(config=config)

    # K.set_epsilon(1e-4)
    # K.backend.set_floatx('float16')


    model = mnet.MNET_complete(416, training=True)
    anchors = mnet.mnet_anchors
    anchor_masks = mnet.mnet_anchor_masks

    batch_size = 8
    
#     Get the training set
    train_dataset = dataset.load_tfrecord_dataset(train_path)


    # Allows you to filter out a class of one type, doesnt seem to work though
    @tf.function
    def filter_person(x , y):
        if tf.reduce_any(tf.math.equal(tf.constant([1]), tf.cast(y, dtype='int32'))):
            return False
        else:
            return True

    # train_dataset = train_dataset.filter(filter_person)

    train_dataset = train_dataset.shuffle(buffer_size=1024)
    train_dataset = train_dataset.batch(batch_size)


    
    train_dataset = train_dataset.map(lambda x, y: (
        dataset.transform_images(x, 416),
        dataset.transform_targets(y, anchors, anchor_masks, 80)))

    train_dataset = train_dataset.prefetch(
        buffer_size=tf.data.experimental.AUTOTUNE)

    
    val_dataset = dataset.load_tfrecord_dataset(val_path)


    val_dataset = val_dataset.batch(batch_size)
    val_dataset = val_dataset.map(lambda x, y: (
        dataset.transform_images(x, 416),
        dataset.transform_targets(y, anchors, anchor_masks, 80)))

    optimizer = tf.keras.optimizers.Adam(lr = 1e-3)
    loss = [mnet.Loss(anchors[mask], classes = 80) for mask in anchor_masks]
    mAP = [mnet.map(anchors[mask]) for mask in anchor_masks]
    
    avg_loss = tf.keras.metrics.Mean('loss', dtype=tf.float32)
    avg_val_loss = tf.keras.metrics.Mean('val_loss', dtype=tf.float32)

    eager = False


    logdir = "logs/" + datetime.now().strftime("%Y-%m-%d-%H:%M")
    tensorboard_callback = K.callbacks.TensorBoard(log_dir=logdir)


    # model.compile(optimizer=optimizer, loss=loss, run_eagerly=(False))
    model.compile(optimizer=optimizer, loss=loss, run_eagerly=(False), metrics=[*mAP])

    callbacks = [
        # ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=3, verbose=1),
        EarlyStopping(monitor='val_loss', patience=3, verbose=1),
        ModelCheckpoint('checkpoints/mnet_train_{epoch}_' + datetime.now().strftime("%m-%d") + '.tf', verbose=1, save_weights_only=True),
        tensorboard_callback]

    history = model.fit(train_dataset, validation_data=val_dataset, epochs=100, callbacks=callbacks, validation_steps=int(val_size/batch_size))
예제 #8
0
def main():
    args = parse_arguments()
    physical_devices = tf.config.experimental.list_physical_devices('GPU')
    if len(physical_devices) > 0:
        tf.config.experimental.set_memory_growth(physical_devices[0], True)

    model = yolov3_tiny(training=True)
    anchors = yolo_tiny_anchors
    anchor_masks = yolo_tiny_anchor_masks

    train_dataset = dataset.load_tfrecord_dataset(args.train_dataset,
                                                  args.classes, args.size)
    train_dataset = train_dataset.shuffle(buffer_size=512)
    train_dataset = train_dataset.batch(args.batch_size)
    train_dataset = train_dataset.map(lambda x, y: (
        dataset.transform_images(x, args.size),
        dataset.transform_targets(y, anchors, anchor_masks, args.size)))
    train_dataset = train_dataset.prefetch(
        buffer_size=tf.data.experimental.AUTOTUNE)

    val_dataset = dataset.load_tfrecord_dataset(args.val_dataset, args.classes,
                                                args.size)
    val_dataset = val_dataset.batch(args.batch_size)
    val_dataset = val_dataset.map(lambda x, y: (
        dataset.transform_images(x, args.size),
        dataset.transform_targets(y, anchors, anchor_masks, args.size)))
    # All other transfer require matching classes
    model.load_weights(args.weights)

    optimizer = tf.keras.optimizers.Adam(lr=args.learning_rate)
    loss = [get_yolo_loss(anchors[mask]) for mask in anchor_masks]

    if args.mode == 'eager_tf':
        # Eager mode is great for debugging
        # Non eager graph mode is recommended for real training
        avg_loss = tf.keras.metrics.Mean('loss', dtype=tf.float32)
        avg_val_loss = tf.keras.metrics.Mean('val_loss', dtype=tf.float32)

        for epoch in range(1, args.epochs + 1):
            for batch, (images, labels) in enumerate(train_dataset):
                with tf.GradientTape() as tape:
                    outputs = model(images, training=True)
                    regularization_loss = tf.reduce_sum(model.losses)
                    pred_loss = []
                    for output, label, loss_fn in zip(outputs, labels, loss):
                        pred_loss.append(loss_fn(label, output))
                    total_loss = tf.reduce_sum(pred_loss) + regularization_loss

                grads = tape.gradient(total_loss, model.trainable_variables)
                optimizer.apply_gradients(zip(grads,
                                              model.trainable_variables))

                print(
                    f"{epoch}_train_{batch}, {total_loss.numpy()}, {list(map(lambda x: np.sum(x.numpy()), pred_loss))}"
                )
                avg_loss.update_state(total_loss)

            for batch, (images, labels) in enumerate(val_dataset):
                outputs = model(images)
                regularization_loss = tf.reduce_sum(model.losses)
                pred_loss = []
                for output, label, loss_fn in zip(outputs, labels, loss):
                    pred_loss.append(loss_fn(label, output))
                total_loss = tf.reduce_sum(pred_loss) + regularization_loss

                print(
                    f"{epoch}_val_{batch}, {total_loss.numpy()}, {list(map(lambda x: np.sum(x.numpy()), pred_loss))}"
                )
                avg_val_loss.update_state(total_loss)

            print(
                f"{epoch}, train: {avg_loss.result().numpy()}, val: {avg_val_loss.result().numpy()}"
            )
            avg_loss.reset_states()
            avg_val_loss.reset_states()
            model.save_weights('checkpoints/yolov3_train_{}.tf'.format(epoch))
    else:
        model.compile(optimizer=optimizer,
                      loss=loss,
                      run_eagerly=(args.mode == 'eager_fit'))

        if not os.path.exists(f"./logs"):
            os.mkdir(f"logs")

        callbacks = [
            tf.keras.callbacks.ReduceLROnPlateau(verbose=1),
            tf.keras.callbacks.EarlyStopping(patience=3, verbose=1),
            tf.keras.callbacks.ModelCheckpoint(
                'checkpoints/yolov3_train_{epoch}.tf',
                verbose=1,
                save_weights_only=True),
            tf.keras.callbacks.TensorBoard(log_dir='logs')
        ]

        history = model.fit(train_dataset,
                            epochs=args.epochs,
                            callbacks=callbacks,
                            validation_data=val_dataset)
        _ = history