Exemplo n.º 1
0
def run_eval(file_root, annotations_file, weights_file, batch_size, steps):

    model = YOLOv4Model()
    model.load_weights(weights_file)

    seed = int.from_bytes(os.urandom(4), "little")

    pipeline = YOLOv4Pipeline(
        file_root, annotations_file, batch_size, (608, 608), 1, 0, seed,
        dali_use_gpu=True,
        is_training=False
    )
    dataset = pipeline.dataset()

    model.compile(run_eagerly=True)
    model.evaluate(pipeline.dataset(), steps=steps)
Exemplo n.º 2
0
def run_infer(weights_file, labels_file, image_path, out_filename):

    model = YOLOv4Model()
    model.load_weights(weights_file)

    img, input = read_img(image_path, 608)

    cls_names = open(labels_file, "r").read().split("\n")

    boxes, scores, labels = inference.infer(model, cls_names, input)

    pixels = add_bboxes(img, boxes, scores, labels)
    if out_filename:
        save_img(out_filename, pixels)
    else:
        draw_img(pixels)
Exemplo n.º 3
0
def run_infer(weights_file, labels_file, image_path, out_filename):

    cls_names = open(labels_file, "r").read().split("\n")

    model = YOLOv4Model()
    model.load_weights(weights_file)

    img, input = read_img(image_path, 608)

    prediction = model.predict(input)
    boxes, scores, labels = inference.decode_prediction(prediction, len(cls_names))

    labels = [cls_names[cls] for cls in labels]

    pixels = add_bboxes(img, boxes, scores, labels)
    if out_filename:
        save_img(out_filename, pixels)
    else:
        draw_img(pixels)
Exemplo n.º 4
0
import numpy as np

from img import read_img, draw_img

import math


def sigmoid(x):
    return 1 / (1 + math.exp(-x))


img, input = read_img("test_img/doggos.jpg", 608)

print(input.shape)
# input = np.random.random([1, 608, 608, 3])
model = YOLOv4Model()
print(model.summary())
model.load_weights("yolov4.weights")

anchor_sizes = [
    [(12, 16), (19, 36), (40, 28)],
    [(36, 75), (76, 55), (72, 146)],
    [(142, 110), (192, 243), (459, 401)],
]
scales = [1.2, 1.1, 1.05]
pred_boxes = []
for iscale, preds in enumerate(model(input)):
    s = preds.shape
    gw, gh = s[1:3]
    d = s[3]
    for i in range(gw):
Exemplo n.º 5
0
def train(file_root, annotations, batch_size, epochs, steps_per_epoch,
          **kwargs):

    seed = kwargs.get("seed")
    if not seed:
        seed = int.from_bytes(os.urandom(4), "little")
    else:
        os.environ['PYTHONHASHSEED'] = str(seed)
        tf.random.set_seed(seed)
        np.random.seed(seed)
        random.seed(seed)

        os.environ['TF_DETERMINISTIC_OPS'] = '1'
        os.environ['TF_CUDNN_DETERMINISTIC'] = '1'

    if SET_MEMORY_GROWTH:
        pds = tf.config.list_physical_devices('GPU')
        for pd in pds:
            tf.config.experimental.set_memory_growth(pd, True)

    pipeline = kwargs.get("pipeline")
    use_mosaic = kwargs.get("use_mosaic")
    log_dir = kwargs.get("log_dir")
    ckpt_dir = kwargs.get("ckpt_dir")
    start_weights = kwargs.get("start_weights")

    def get_dataset_fn(file_root, annotations, batch_size, pipeline,
                       is_training):
        def dataset_fn(input_context):
            image_size = (608, 608)
            device_id = input_context.input_pipeline_id
            num_threads = input_context.num_input_pipelines

            if pipeline == 'dali-gpu' or pipeline == 'dali-cpu':
                with tf.device("/gpu:{}".format(
                        input_context.input_pipeline_id)):
                    yolo = YOLOv4Pipeline(file_root,
                                          annotations,
                                          batch_size,
                                          image_size,
                                          num_threads,
                                          device_id,
                                          seed,
                                          use_gpu=pipeline == 'dali-gpu',
                                          is_training=is_training,
                                          use_mosaic=use_mosaic)
                    return yolo.dataset()

            if pipeline == 'numpy':
                yolo = YOLOv4PipelineNumpy(file_root,
                                           annotations,
                                           batch_size,
                                           image_size,
                                           num_threads,
                                           device_id,
                                           seed,
                                           is_training=is_training,
                                           use_mosaic=use_mosaic)
                return yolo.dataset()

        return dataset_fn

    total_steps = epochs * steps_per_epoch
    initial_lr = kwargs.get("lr")
    lr_fn = YOLOLearningRateSchedule(initial_lr)

    initial_epoch = 0

    multigpu = kwargs.get("multigpu")
    strategy = tf.distribute.MirroredStrategy(
    ) if multigpu else tf.distribute.get_strategy()

    with strategy.scope():
        model = YOLOv4Model()
        model.compile(optimizer=tf.keras.optimizers.SGD(learning_rate=lr_fn))

    if start_weights:
        model.load_weights(start_weights)
        fn = start_weights.split('/')[-1]
        if fn.endswith('.h5') and fn.startswith('epoch_'):
            initial_epoch = int(fn[6:-3])

    input_options = tf.distribute.InputOptions(
        experimental_place_dataset_on_device=True,
        experimental_fetch_to_device=False,
        experimental_replication_mode=tf.distribute.InputReplicationMode.
        PER_REPLICA)

    dataset = strategy.distribute_datasets_from_function(
        get_dataset_fn(file_root, annotations, batch_size, pipeline, True),
        input_options)

    eval_file_root = kwargs.get('eval_file_root')
    eval_annotations = kwargs.get('eval_annotations')
    eval_dataset = None
    if not eval_file_root is None and not eval_annotations is None:
        eval_dataset = strategy.distribute_datasets_from_function(
            get_dataset_fn(eval_file_root, eval_annotations, 1, 'dali-cpu',
                           False), tf.distribute.InputOptions())

    callbacks = []
    if log_dir:
        callbacks.append(
            tf.keras.callbacks.TensorBoard(log_dir=log_dir,
                                           update_freq='epoch'))
    if ckpt_dir:
        callbacks.append(SaveWeightsCallback(ckpt_dir))

    model.fit(
        dataset,
        epochs=epochs,
        steps_per_epoch=steps_per_epoch,
        initial_epoch=initial_epoch,
        callbacks=callbacks,
        validation_data=eval_dataset,
        validation_steps=kwargs.get('eval_steps'),
        validation_freq=kwargs.get('eval_frequency'),
    )

    return model
Exemplo n.º 6
0
def train(file_root, annotations_file, batch_size, epochs, steps_per_epoch,
          **kwargs):

    if SET_MEMORY_GROWTH:
        physical_devices = tf.config.list_physical_devices('GPU')
        tf.config.experimental.set_memory_growth(physical_devices[0], True)

    use_gpu = kwargs.get("use_gpu", True)
    log_dir = kwargs.get("log_dir")
    ckpt_dir = kwargs.get("ckpt_dir")
    start_weights = kwargs.get("start_weights")
    initial_epoch = 0

    strategy = tf.distribute.MirroredStrategy()

    with strategy.scope():
        model = YOLOv4Model()
        model.compile(optimizer=tf.keras.optimizers.Adam())

    if start_weights:
        model.load_weights(start_weights)
        fn = start_weights.split('/')[-1]
        if fn.endswith('.h5') and fn.startswith('epoch_'):
            initial_epoch = int(fn[6:-3])

    def dataset_fn(input_context):
        with tf.device("/gpu:{}".format(input_context.input_pipeline_id)):
            device_id = input_context.input_pipeline_id
            num_threads = input_context.num_input_pipelines
            image_size = (608, 608)
            seed = int.from_bytes(os.urandom(4), "little")

            pipeline = YOLOv4Pipeline(file_root, annotations_file, batch_size,
                                      image_size, num_threads, device_id, seed,
                                      use_gpu)
            return pipeline.dataset()

    input_options = tf.distribute.InputOptions(
        experimental_place_dataset_on_device=True,
        experimental_prefetch_to_device=False,
        experimental_replication_mode=tf.distribute.InputReplicationMode.
        PER_REPLICA)

    dataset = strategy.distribute_datasets_from_function(
        dataset_fn, input_options)

    callbacks = []
    if log_dir:
        callbacks.append(
            tf.keras.callbacks.TensorBoard(log_dir=log_dir,
                                           update_freq='epoch'))
    if ckpt_dir:
        callbacks.append(SaveWeightsCallback(ckpt_dir))

    model.fit(dataset,
              epochs=epochs,
              steps_per_epoch=steps_per_epoch,
              initial_epoch=initial_epoch,
              callbacks=callbacks)

    return model