Exemplo n.º 1
0
def test_dataset():
    classes_path = 'model_data/voc_classes.txt'
    anchors_path = 'model_data/tiny_yolo_anchors.txt'
    class_names = get_classes(classes_path)
    num_classes = len(class_names)
    anchors = get_anchors(anchors_path)
    input_shape = (416, 416)  # multiple of 32, hw
    batch_size = 1
    annotation_path = 'train.txt'
    with open(annotation_path) as f:
        lines = f.readlines()

    tset = create_dataset(lines[:2000], batch_size, input_shape, anchors,
                          num_classes, False)
    ter = iter(tset)

    for i in range(3):
        a, b = next(ter)
        img, lb1, lb2 = a[0][0], a[1][0], a[2][0]
        plt.imshow(img.numpy())
        plt.show()

    true_confidence = lb1[..., 4:5]

    obj_mask = true_confidence[..., 0] > .7

    tf.boolean_mask(lb1, obj_mask)

    # NOTE 他就是按比例缩小图像的。
    np.min(a[0][0])
    np.max(a[0][0])
Exemplo n.º 2
0
def test_get_random_data():
    classes_path = 'model_data/voc_classes.txt'
    anchors_path = 'model_data/tiny_yolo_anchors.txt'
    class_names = get_classes(classes_path)
    num_classes = len(class_names)
    anchors = get_anchors(anchors_path)
    input_shape = (416, 416)  # multiple of 32, hw
    batch_size = 1
    annotation_path = 'train.txt'
    with open(annotation_path) as f:
        lines = f.readlines()

    for i in range(10):
        img, box = get_random_data(lines[i], input_shape, False)
        box = box[np.newaxis, :2, :]
        # print(box,input_shape,anchors,num_classes)
        y_true = preprocess_true_boxes(box,
                                       input_shape,
                                       anchors,
                                       num_classes,
                                       is_print=True)

        for a in y_true:
            true_box = a[np.where(a[..., 4] > 0)]
            true_box[:, :2] *= input_shape[::-1]
            true_box[:, 2:4] *= input_shape[::-1]

            xyxy_box = center_to_corner(true_box)
            # print(xyxy_box)
            for b in xyxy_box:
                cv2.rectangle(img, tuple(b[:2].astype(int)),
                              tuple(b[2:4].astype(int)), (255, 0, 0))
Exemplo n.º 3
0
def test_zip_dataset():
    """ 尝试zip dataset,但还是失败了 """
    annotation_path = 'train.txt'
    classes_path = 'model_data/voc_classes.txt'
    anchors_path = 'model_data/yolo_anchors.txt'
    class_names = get_classes(classes_path)
    num_classes = len(class_names)
    anchors = get_anchors(anchors_path)

    val_split = 0.1
    with open(annotation_path) as f:
        annotation_lines = f.readlines()
    np.random.seed(10101)
    np.random.shuffle(annotation_lines)
    np.random.seed(None)
    num_val = int(len(annotation_lines) * val_split)
    num_train = len(annotation_lines) - num_val

    batch_size = 32
    input_shape = (416, 416)

    num = len(annotation_lines)
    if num == 0 or batch_size <= 0:
        raise ValueError

    def parser(lines):
        image_data = []
        box_data = []
        for line in lines:
            image, box = get_random_data(line.numpy().decode(),
                                         input_shape,
                                         random=True)
            image_data.append(image)
            box_data.append(box)

        image_data = np.array(image_data)
        box_data = np.array(box_data)

        y_true = [
            tf.convert_to_tensor(y, tf.float32) for y in preprocess_true_boxes(
                box_data, input_shape, anchors, num_classes)
        ]
        image_data = tf.convert_to_tensor(image_data, tf.float32)
        return (image_data, *y_true)

    x_set = (tf.data.Dataset.from_tensor_slices(annotation_lines).apply(
        tf.data.experimental.shuffle_and_repeat(
            batch_size * 300, seed=66)).batch(
                batch_size, drop_remainder=True).map(lambda lines: py_function(
                    parser, [lines], [tf.float32] * (1 + len(anchors) // 3))))
    y_set = tf.data.Dataset.from_tensors(tf.zeros(batch_size,
                                                  tf.float32)).repeat()
    dataset = tf.data.Dataset.zip((x_set, y_set))

    sample = next(iter(dataset))
Exemplo n.º 4
0
def test_resize_img():
    classes_path = 'model_data/voc_classes.txt'
    anchors_path = 'model_data/tiny_yolo_anchors.txt'
    class_names = get_classes(classes_path)
    num_classes = len(class_names)
    anchors = get_anchors(anchors_path)
    input_shape = (224, 320)  # multiple of 32, hw
    batch_size = 1
    annotation_path = 'train.txt'
    with open(annotation_path) as f:
        lines = f.readlines()

    img, box = get_random_data(lines[3], input_shape, False)
    plt.imshow(img)
    plt.imsave('')
Exemplo n.º 5
0
def test_model_graph():
    """ tensorflow.keras 中load weights不支持那个跳过不匹配的层,所以必须手动控制权重 """
    yolo = keras.models.load_model(
        'model_data/yolo_weights.h5')  # type:keras.models.Model
    tbcback = TensorBoard()
    tbcback.set_model(yolo)

    annotation_path = 'train.txt'
    log_dir = 'logs/000/'
    classes_path = 'model_data/voc_classes.txt'
    anchors_path = 'model_data/yolo_anchors.txt'
    class_names = get_classes(classes_path)
    num_classes = len(class_names)
    anchors = get_anchors(anchors_path)

    input_shape = (416, 416)  # multiple of 32, hw

    h, w = input_shape
    image_input = keras.Input(shape=(h, w, 3))
    num_anchors = len(anchors)

    y_true = [
        keras.Input(shape=(h // {
            0: 32,
            1: 16,
            2: 8
        }[l], w // {
            0: 32,
            1: 16,
            2: 8
        }[l], num_anchors // 3, num_classes + 5)) for l in range(3)
    ]

    model_body = yolo_body(image_input, num_anchors // 3, num_classes)
    print('Create YOLOv3 model with {} anchors and {} classes.'.format(
        num_anchors, num_classes))

    yolo_weight = yolo.get_weights()
    for i, w in enumerate(yolo_weight):
        if w.shape == (1, 1, 1024, 255):
            yolo_weight[i] = w[..., :(num_anchors // 3) * (num_classes + 5)]
        if w.shape == (1, 1, 512, 255):
            yolo_weight[i] = w[..., :(num_anchors // 3) * (num_classes + 5)]
        if w.shape == (1, 1, 256, 255):
            yolo_weight[i] = w[..., :(num_anchors // 3) * (num_classes + 5)]
        if w.shape == (255, ):
            yolo_weight[i] = w[:(num_anchors // 3) * (num_classes + 5)]
    model_body.set_weights(yolo_weight)
Exemplo n.º 6
0
def test_parser():
    """ 测试parser函数以支持eager tensor """
    annotation_path = 'train.txt'
    classes_path = 'model_data/voc_classes.txt'
    anchors_path = 'model_data/yolo_anchors.txt'
    class_names = get_classes(classes_path)
    num_classes = len(class_names)
    anchors = get_anchors(anchors_path)

    val_split = 0.1
    with open(annotation_path) as f:
        annotation_lines = f.readlines()
    np.random.seed(10101)
    np.random.shuffle(annotation_lines)
    np.random.seed(None)
    num_val = int(len(annotation_lines) * val_split)
    num_train = len(annotation_lines) - num_val

    batch_size = 32
    input_shape = (416, 416)

    num = len(annotation_lines)
    if num == 0 or batch_size <= 0:
        raise ValueError

    lines = tf.convert_to_tensor(annotation_lines[:10], tf.string)
    """ start parser """
    image_data = []
    box_data = []
    for line in lines:
        image, box = get_random_data(line.numpy().decode(),
                                     input_shape,
                                     random=True)
        image_data.append(image)
        box_data.append(box)

    image_data = np.array(image_data)
    box_data = np.array(box_data)

    y_true = [
        tf.convert_to_tensor(y, tf.float32) for y in preprocess_true_boxes(
            box_data, input_shape, anchors, num_classes)
    ]
    image_data = tf.convert_to_tensor(image_data, tf.float32)
    return (image_data, *y_true)
Exemplo n.º 7
0
def test_dict_dataset():
    """ 尝试输出字典形式的dataset """
    annotation_path = 'train.txt'
    classes_path = 'model_data/voc_classes.txt'
    anchors_path = 'model_data/yolo_anchors.txt'
    class_names = get_classes(classes_path)
    num_classes = len(class_names)
    anchors = get_anchors(anchors_path)

    val_split = 0.1
    with open(annotation_path) as f:
        annotation_lines = f.readlines()
    np.random.seed(10101)
    np.random.shuffle(annotation_lines)
    np.random.seed(None)
    num_val = int(len(annotation_lines) * val_split)
    num_train = len(annotation_lines) - num_val

    batch_size = 32
    input_shape = (416, 416)

    num = len(annotation_lines)
    if num == 0 or batch_size <= 0:
        raise ValueError

    def parser(lines):
        image_data = []
        box_data = []
        for line in lines:
            image, box = get_random_data(line, input_shape, random=True)
            image_data.append(image)
            box_data.append(box)
        image_data = np.array(image_data)
        box_data = np.array(box_data)
        y_true = preprocess_true_boxes(box_data, input_shape, anchors,
                                       num_classes)
        return {
            'input_1': image_data,
            'input_2': y_true[0],
            'input_3': y_true[1],
            'input_4': y_true[2]
        }
Exemplo n.º 8
0
from train import get_classes, get_anchors, create_model, create_tiny_model, data_generator, data_generator_wrapper
"""把 YOLO weights 轉換為能夠提供給 keras 作為訓練新模型的初始權重,注意這部分多了一個 `-w` 的參數,可以參考 https://github.com/qqwweee/keras-yolo3/blob/master/convert.py#L242 以及 https://stackoverflow.com/questions/42621864/difference-between-keras-model-save-and-model-save-weights 理解其中差別"""
'''
if not os.path.exists("model_data/yolo_weights.h5"):
    print("Converting pretrained YOLOv3 weights for training")
    os.system("python convert.py -w yolov3.cfg yolov3.weights model_data/yolo_weights.h5") 
else:
    print("Pretrained weights exists")

'''
annotation_path = '2007_train.txt'  # 轉換好格式的標註檔案
log_dir = 'logs/000/'  # 訓練好的模型儲存的路徑
classes_path = 'model_data/voc_classes.txt'
anchors_path = 'model_data/yolo_anchors.txt'
class_names = get_classes(classes_path)
num_classes = len(class_names)
anchors = get_anchors(anchors_path)

input_shape = (416, 416)  # multiple of 32, hw

is_tiny_version = len(anchors) == 6  # default setting
if is_tiny_version:
    model = create_tiny_model(input_shape,
                              anchors,
                              num_classes,
                              freeze_body=2,
                              weights_path='model_data/tiny_yolo_weights.h5')
else:
    model = create_model(input_shape,
                         anchors,
Exemplo n.º 9
0
## Derived constants
MODEL_DIR = './models/' + VERSION_NAME
LOG_DIR = './logs/' + VERSION_NAME + '/'
TRAINED_BASE_FILE = os.path.join(MODEL_DIR, 'trained_weights_base.h5')
TRAINED_FINAL_FILE = os.path.join(MODEL_DIR, 'trained_weights_final.h5')

# Create directories
if not os.path.exists(MODEL_DIR):
    os.makedirs(MODEL_DIR)

if not os.path.exists(LOG_DIR):
    os.makedirs(LOG_DIR)

# Setup Model
class_names = train.get_classes(CLASSES_FILE)
num_classes = len(class_names)
anchors = train.get_anchors(ANCHORS_FILE)

logging = TensorBoard(log_dir=LOG_DIR)

checkpoint = ModelCheckpoint(os.path.join(MODEL_DIR, TEMP_MODEL_FORMAT),
                             monitor='val_loss',
                             save_weights_only=True,
                             save_best_only=True,
                             period=3)

reduce_lr = ReduceLROnPlateau(monitor='val_loss',
                              factor=0.1,
                              patience=3,
                              verbose=1)
Exemplo n.º 10
0
def make_model(model_file,
               weights_file,
               anchor_file,
               end_step,
               initial_sparsity,
               end_sparsity,
               frequency,
               **kwargs):
    annotation_path = 'model_data/combined1.txt'
    log_dir = 'logs/000/'
    classes_path = 'model_data/classes.txt'
    anchors_path = 'model_data/yolo_anchors.txt'
    class_names = get_classes(classes_path)
    num_classes = len(class_names)
    anchors = np.load(anchor_file,allow_pickle=True)
    model_path = 'model_data/'
    init_model= model_path + '/pelee3'
    new_pruned_keras_file = model_path + 'pruned_' + init_model
    epochs = 100
    batch_size = 16
    init_epoch = 50
    input_shape = (384,288) # multiple of 32, hw
    log_dir = 'logs/000/'
    config_path = model_file
    weights_path = weights_file
    output_path = model_file + '.tf'
    output_root = os.path.splitext(output_path)[0]
    val_split = 0.1
    with open(annotation_path) as f:
        lines = f.readlines()
    np.random.seed(10101)
    np.random.shuffle(lines)
    np.random.seed(None)
    num_val = int(len(lines)*val_split)
    num_train = len(lines) - num_val
    # Load weights and config.
    print('Loading weights.')
    weights_file = open(weights_path, 'rb')
    major, minor, revision = np.ndarray(
        shape=(3, ), dtype='int32', buffer=weights_file.read(12))
    if (major*10+minor)>=2 and major<1000 and minor<1000:
        seen = np.ndarray(shape=(1,), dtype='int64', buffer=weights_file.read(8))
    else:
        seen = np.ndarray(shape=(1,), dtype='int32', buffer=weights_file.read(4))
    print('Weights Header: ', major, minor, revision, seen)

    print('Parsing Darknet config.')
    unique_config_file = unique_config_sections(config_path)
    cfg_parser = configparser.ConfigParser()
    cfg_parser.read_file(unique_config_file)
    first_layer = True
    print('Creating Keras model.')
    all_layers = []
    weight_decay = float(cfg_parser['net_0']['decay']
                         ) if 'net_0' in cfg_parser.sections() else 5e-4
    count = 0
    out_index = []
    pruning_params = {
        'pruning_schedule':tfmot.sparsity.keras.PolynomialDecay(initial_sparsity = initial_sparsity,
                                                     final_sparsity = end_sparsity,
                                                     begin_step = 0,
                                                     end_step = end_step,
                                                     frequency = frequency)
    }
    for section in cfg_parser.sections():
        print('Parsing section {}'.format(section))
        if section.startswith('convolutional'):
            filters = int(cfg_parser[section]['filters'])
            size = int(cfg_parser[section]['size'])
            stride = int(cfg_parser[section]['stride'])
            pad = int(cfg_parser[section]['pad'])
            activation = cfg_parser[section]['activation']
            batch_normalize = 'batch_normalize' in cfg_parser[section]

            padding = 'same' if pad == 1 and stride == 1 else 'valid'

            # Setting weights.
            # Darknet serializes convolutional weights as:
            # [bias/beta, [gamma, mean, variance], conv_weights]
            prev_layer_shape = K.int_shape(prev_layer)

            weights_shape = (size, size, prev_layer_shape[-1], filters)
            darknet_w_shape = (filters, weights_shape[2], size, size)
            weights_size = np.product(weights_shape)

            print('conv2d', 'bn'
                  if batch_normalize else '  ', activation, weights_shape)

            conv_bias = np.ndarray(
                shape=(filters, ),
                dtype='float32',
                buffer=weights_file.read(filters * 4))
            count += filters

            if batch_normalize:
                bn_weights = np.ndarray(
                    shape=(3, filters),
                    dtype='float32',
                    buffer=weights_file.read(filters * 12))
                count += 3 * filters

                bn_weight_list = [
                    bn_weights[0],  # scale gamma
                    conv_bias,  # shift beta
                    bn_weights[1],  # running mean
                    bn_weights[2]  # running var
                ]

            conv_weights = np.ndarray(
                shape=darknet_w_shape,
                dtype='float32',
                buffer=weights_file.read(weights_size * 4))
            count += weights_size

            # DarkNet conv_weights are serialized Caffe-style:
            # (out_dim, in_dim, height, width)
            # We would like to set these to Tensorflow order:
            # (height, width, in_dim, out_dim)
            conv_weights = np.transpose(conv_weights, [2, 3, 1, 0])
            conv_weights = [conv_weights] if batch_normalize else [
                conv_weights, conv_bias
            ]

            # Handle activation.
            act_fn = None
            if activation != 'linear':
                pass  # Add advanced activation later.
            elif activation != 'linear':
                raise ValueError(
                    'Unknown activation function `{}` in section {}'.format(
                        activation, section))

            # Create Conv2D layer
            if stride>1:
                # Darknet uses left and top padding instead of 'same' mode
                prev_layer = ZeroPadding2D(((1,0),(1,0)))(prev_layer)
            if(first_layer):
                conv_layer = Conv2D(
                    filters, (size, size),
                    strides=(stride, stride),
                    kernel_regularizer=l2(weight_decay),
                    use_bias=not batch_normalize,
                    weights=conv_weights,
                    activation=act_fn,
                    padding=padding)(prev_layer)
            else:
                conv_layer =  prune.prune_low_magnitude(Conv2D(
                        filters, (size, size),
                        strides=(stride, stride),
                        kernel_regularizer=l2(weight_decay),
                        use_bias=not batch_normalize,
                        weights=conv_weights,
                        activation=act_fn,
                        padding=padding),
                        **pruning_params)(prev_layer)
            if batch_normalize:
                conv_layer = BatchNormalization(
                    weights=bn_weight_list)(conv_layer)
            prev_layer = conv_layer
            first_layer=False
            if activation == 'linear':
                all_layers.append(prev_layer)
            elif activation == 'leaky':
                act_layer = LeakyReLU(alpha=0.1)(prev_layer)
                prev_layer = act_layer
                all_layers.append(act_layer)
            elif activation == 'swish':
                act_layer = sigmoid(prev_layer)
                prev_layer = act_layer
                all_layers.append(act_layer)

        elif section.startswith('route'):
            ids = [int(i) for i in cfg_parser[section]['layers'].split(',')]
            layers = [all_layers[i] for i in ids]
            if len(layers) > 1:
                print('Concatenating route layers:', layers)
                concatenate_layer = Concatenate()(layers)
                all_layers.append(concatenate_layer)
                prev_layer = concatenate_layer
            else:
                skip_layer = layers[0]  # only one layer to route
                all_layers.append(skip_layer)
                prev_layer = skip_layer
			
        elif section.startswith('maxpool'):
            size = int(cfg_parser[section]['size'])
            stride = int(cfg_parser[section]['stride'])
            all_layers.append(
                MaxPooling2D(
                    pool_size=(size, size),
                    strides=(stride, stride),
                    padding='same')(prev_layer))
            prev_layer = all_layers[-1]

        elif section.startswith('shortcut'):
            index = int(cfg_parser[section]['from'])
            activation = cfg_parser[section]['activation']
            all_layers.append(Add()([all_layers[index], prev_layer]))
            prev_layer = all_layers[-1]
            all_layers.append(LeakyReLU(alpha=0.1)(prev_layer))
            prev_layer = all_layers[-1]

        elif section.startswith('upsample'):
            stride = int(cfg_parser[section]['stride'])
            assert stride == 2, 'Only stride=2 supported.'
            all_layers.append(UpSampling2D(stride)(prev_layer))
            prev_layer = all_layers[-1]

        elif section.startswith('yolo'):
            out_index.append(len(all_layers)-1)
            all_layers.append(None)
            prev_layer = all_layers[-1]

        elif section.startswith('net'):
            height = int(cfg_parser[section]['height'])
            width = int(cfg_parser[section]['width'])
            input_layer = Input(shape=(height, width, 3))
            prev_layer = input_layer
            output_size = (width, height)

        else:
            raise ValueError(
                'Unsupported section header type: {}'.format(section))

    # Create and save model.
    if len(out_index)==0: out_index.append(len(all_layers)-1)
    num_anchors = len(anchors[0])
    num_layers = len(out_index)
    if(num_layers>0):
        shape = K.int_shape(all_layers[out_index[0]])
        y1_reshape = KLayer.Reshape((shape[1],shape[2], num_anchors, 5 + num_classes), name='l1')(all_layers[out_index[0]])
    if(num_layers>1):
        shape = K.int_shape(all_layers[out_index[1]])
        y2_reshape = KLayer.Reshape((shape[1],shape[2], num_anchors, 5 + num_classes), name='l2')(all_layers[out_index[1]])
    yolo_model = Model(inputs=input_layer, outputs=[all_layers[i] for i in out_index])
    if(num_layers > 1):
        yolo_model_wrapper = Model(input_layer, [y1_reshape, y2_reshape])
    else:
        yolo_model_wrapper = Model(input_layer, [y1_reshape])
    print(yolo_model.summary())
    return yolo_model,yolo_model_wrapper,output_size

    if False:
        if args.weights_only:
            model.save_weights('{}'.format(output_path))
            print('Saved Keras weights to {}'.format(output_path))
        else:
            model.save('{}'.format(output_path),save_format='tf')
            print('Saved Keras model to {}'.format(output_path))

        # Check to see if all weights have been read.
        remaining_weights = len(weights_file.read()) / 4
        weights_file.close()
        print('Read {} of {} from Darknet weights.'.format(count, count +
                                                           remaining_weights))
        if remaining_weights > 0:
            print('Warning: {} unused weights'.format(remaining_weights))

    if True:
        model = create_model(model, anchors, num_classes, input_shape, input_layer, layers, out_index)
        yolo_model_wrapper.compile(
            loss=tf.keras.losses.categorical_crossentropy,
            optimizer='adam',
            metrics=['accuracy'],
            callbacks = [
                sparsity.keras.pruning_callbacks.UpdatePruningStep(),
                sparsity.keras.pruning_callbacks.PruningSummaries(log_dir=log_dir, profile_batch=0)
            ]
            )
        for i in range(len(model.layers)):
            model.layers[i].trainable = True
        model.compile(optimizer=Adam(lr=1e-3), loss={'yolo_loss': lambda y_true, y_pred: y_pred}) # recompile to apply the change
        print('Unfreeze all of the layers.')
        print(model.summary())

        batch_size = 16 # note that more GPU memory is required after unfreezing the body
        print('Train on {} samples, val on {} samples, with batch size {}.'.format(num_train, num_val, batch_size))
        model.fit_generator(data_generator_wrapper(lines[:num_train], batch_size, input_shape, anchors, num_classes),
            steps_per_epoch=max(1, num_train//batch_size),
            validation_data=data_generator_wrapper(lines[num_train:], batch_size, input_shape, anchors, num_classes),
            validation_steps=max(1, num_val//batch_size),
            epochs=5,
            initial_epoch=0)


       #m2train.m2train(args,model)
        #score = model.evaluate(data_generator_wrapper(lines[:num_train], batch_size, input_shape, anchors, num_classes),
        #                       class_names, verbose=0)
        #print('Test loss:', score[0])
        #print('Test accuracy:', score[1])
    final_model=model
    final_model = sparsity.keras.prune.strip_pruning(model)
    final_model.summary()
    print('Saving pruned model to: ', new_pruned_keras_file)
    final_model.save('{}'.format(output_path),save_format='tf')
    tflite_model_file = model_path + "sparse.tf"
    converter = tf.lite.TFLiteConverter.from_keras_model(final_model)
    converter.optimizations = [tf.lite.Optimize.OPTIMIZE_FOR_SIZE]
    tflite_model = converter.convert()
    with open(tflite_model_file, 'wb') as f:
      f.write(tflite_model)
Exemplo n.º 11
0
 def test_get_classes(self):
     self.assertEqual(len(get_classes(self.classes_path)), 52)
Exemplo n.º 12
0
def _main():
    from train import get_classes, get_anchors
    annotation_path = 'data/input.csv'
    log_dir = 'logs/000/'
    classes_path = 'model_data/stdogs_classes.txt'
    anchors_path = 'model_data/yolo_anchors.txt'
    val_split = 0.5

    class_names = get_classes(classes_path)
    num_classes = len(class_names)
    anchors = get_anchors(anchors_path)

    input_shape = (416, 416)  # multiple of 32, hw

    is_tiny_version = len(anchors) == 6  # default setting

    with open(annotation_path) as f:
        lines = f.readlines()
    np.random.seed(10101)
    np.random.shuffle(lines)
    np.random.seed(None)
    num_val = int(len(lines) * val_split)
    num_train = len(lines) - num_val

    save_interval = 3
    num_val = int(num_train * 0.2)
    batch_size = 4

    train_data = data_generator_wrapper(lines[:num_train], batch_size,
                                        input_shape, anchors, num_classes)
    eval_data = data_generator_wrapper(lines[num_train:num_train + num_val],
                                       batch_size, input_shape, anchors,
                                       num_classes)

    body = model_body(input_shape,
                      anchors,
                      num_classes,
                      None,
                      freeze_body=2,
                      init_weights='model_data/darknet53.weights.h5',
                      last_save='model_data/tfmodel')

    num_epoch = 100
    optimizer = tf.keras.optimizers.Adam()
    tf.summary.experimental.set_step(0)

    step = 0
    test_summary_writer = tf.summary.create_file_writer(
        os.path.join("logs/test/",
                     datetime.now().strftime("%Y%m%d-%H%M%S")), )
    train_summary_writer = tf.summary.create_file_writer(
        os.path.join("logs/train/",
                     datetime.now().strftime("%Y%m%d-%H%M%S")), )

    def write_loss(writer, **kwargs):
        with writer.as_default():
            for k, v in kwargs.items():
                tf.summary.scalar(k, v, step=step)

    def train_step(image, y1, y2, y3):
        with tf.GradientTape() as tape:
            outputs = body(image)
            loss, losses = loss_wrapper(outputs, [y1, y2, y3], anchors,
                                        num_classes)
            write_loss(train_summary_writer, **losses)
            grads = tape.gradient(loss, body.trainable_weights)
            optimizer.apply_gradients(zip(grads, body.trainable_variables))
            return loss

    def evaluate_step(image, y1, y2, y3):
        with tf.GradientTape() as tape:
            outputs = body(image)
            loss, losses = loss_wrapper(outputs, [y1, y2, y3], anchors,
                                        num_classes)
            write_loss(test_summary_writer, **losses)
            return loss

    for epoch in range(num_epoch):
        print("epoch:", epoch)
        with tqdm(train_data, total=num_train // batch_size) as tbar:
            for x in train_data.take(num_train // batch_size):
                image, y1, y2, y3 = x
                loss = train_step(image, y1, y2, y3)
                tbar.update(1)
                tbar.set_description("loss={:.3f}".format(loss))
                step = step + 1
        val_loss = 0
        for x in eval_data.take(num_val // batch_size):
            image, y1, y2, y3 = x
            val_loss += evaluate_step(image, y1, y2, y3)

        val_loss = val_loss / num_val * batch_size
        print("val_loss:", val_loss)

    if epoch + 1 % save_interval == 0:
        tf.saved_model.save(body, "model_data/tfmodel")