예제 #1
0
def logo_predict(img_path, output_folder_path, input_size, logo_classes, yolo_model):

    # # input size 416x416
    # input_size = YOLO_INPUT_SIZE
    # # list of class names
    # logo_classes = TRAIN_CLASSES

    image_path = img_path

    yolo = Create_Yolov3(input_size=input_size, CLASSES=logo_classes)
    yolo.load_weights(yolo_model)

    date_time = datetime.now().strftime("%d_%m_%y|%H:%M:%S")
    LogoFileName = os.path.join(output_folder_path, "logo_pred_" + date_time + ".jpg")

    # this function returns tuple with label values and image
    label, _ = detect_image(yolo, image_path, LogoFileName, input_size=input_size, show=True, CLASSES=logo_classes, rectangle_colors=(255,0,0))

    predicted_label = label[0]

    return (predicted_label, LogoFileName)
예제 #2
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--video',
                        type=str,
                        default='IMAGES/test1.mp4',
                        help='video path')
    parser.add_argument('--out', type=str, default='./', help='output path')
    args = parser.parse_args()

    input_size = YOLO_INPUT_SIZE

    yolo = Create_Yolov3(input_size=input_size, CLASSES=TRAIN_CLASSES)
    yolo.load_weights("./checkpoints/yolov3_face_Tiny.h5")

    vid_path = args.video
    out_path = args.out + 'output.mp4'
    detect_video(yolo,
                 vid_path,
                 out_path,
                 show=True,
                 CLASSES=TRAIN_CLASSES,
                 iou_threshold=0.25)
#   GitHub      : https://github.com/pythonlessons/TensorFlow-2.x-YOLOv3
#   Description : object detection image and video example
#
#================================================================
import os
os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
import cv2
import numpy as np
import tensorflow as tf
from yolov3.yolov3 import Create_Yolov3
from yolov3.utils import load_yolo_weights, detect_image, detect_video
from yolov3.configs import *

input_size = YOLO_INPUT_SIZE
Darknet_weights = YOLO_DARKNET_WEIGHTS

image_path = "./IMAGES/street.jpg"
video_path = "./IMAGES/city_drive.mp4"

yolo = Create_Yolov3(input_size=input_size)
load_yolo_weights(yolo, Darknet_weights)  # use Darknet weights

detect_image(yolo,
             image_path,
             "",
             input_size=input_size,
             show=True,
             rectangle_colors=(255, 0, 0))
#detect_video(yolo, video_path, '', input_size=input_size, show=True, rectangle_colors=(255,0,0))
#detect_realtime(yolo, input_size=input_size, rectangle_colors=(255, 0, 0))
예제 #4
0
import os
os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
import cv2
import numpy as np
import random
import time
import tensorflow as tf
from yolov3.yolov3 import Create_Yolov3
from yolov3.utils import detect_image
from yolov3.configs import *

input_size = YOLO_INPUT_SIZE

ID = random.randint(0, 200)
label_txt = "mnist/mnist_test.txt"
image_info = open(label_txt).readlines()[ID].split()

image_path = image_info[0]

yolo = Create_Yolov3(input_size=input_size, CLASSES=TRAIN_CLASSES)
yolo.load_weights("./checkpoints/yolov3_custom")  # use keras weights

detect_image(yolo,
             image_path,
             "",
             input_size=input_size,
             show=True,
             CLASSES=TRAIN_CLASSES,
             rectangle_colors=(255, 0, 0))
time.sleep(10)
예제 #5
0
def main():
    global TRAIN_FROM_CHECKPOINT

    gpus = tf.config.experimental.list_physical_devices('GPU')
    if len(gpus) > 0:
        try:
            tf.config.experimental.set_memory_growth(gpus[0], True)
        except RuntimeError:
            pass

    if os.path.exists(TRAIN_LOGDIR): shutil.rmtree(TRAIN_LOGDIR)
    writer = tf.summary.create_file_writer(TRAIN_LOGDIR)

    trainset = Dataset('train')
    testset = Dataset('test')

    steps_per_epoch = len(trainset)
    global_steps = tf.Variable(1, trainable=False, dtype=tf.int64)
    warmup_steps = TRAIN_WARMUP_EPOCHS * steps_per_epoch
    total_steps = TRAIN_EPOCHS * steps_per_epoch

    if TRAIN_TRANSFER:
        Darknet = Create_Yolov3(input_size=YOLO_INPUT_SIZE)
        load_yolo_weights(Darknet, Darknet_weights)  # use darknet weights
        #load_tiny_yolo_weights(Darknet, Darknet_weights) # use darknet weights

    yolo = Create_Yolov3(input_size=YOLO_INPUT_SIZE,
                         training=True,
                         CLASSES=TRAIN_CLASSES)
    if TRAIN_FROM_CHECKPOINT:
        try:
            checkpoint_file = TRAIN_CHECKPOINTS_FOLDER + '/' + TRAIN_MODEL_NAME + '.h5'
            yolo.load_weights(checkpoint_file)
        except ValueError:
            print("Shapes are incompatible, transfering Darknet weights")
            TRAIN_FROM_CHECKPOINT = False

    if TRAIN_TRANSFER and not TRAIN_FROM_CHECKPOINT:
        for i, l in enumerate(Darknet.layers):
            layer_weights = l.get_weights()
            if layer_weights != []:
                try:
                    yolo.layers[i].set_weights(layer_weights)
                except:
                    print("skipping", yolo.layers[i].name)

    optimizer = tf.keras.optimizers.Adam()

    def train_step(image_data, target):
        with tf.GradientTape() as tape:
            pred_result = yolo(image_data, training=True)
            giou_loss = conf_loss = prob_loss = 0

            # optimizing process
            grid = 3 if not TRAIN_YOLO_TINY else 2
            for i in range(grid):
                conv, pred = pred_result[i * 2], pred_result[i * 2 + 1]
                loss_items = compute_loss(pred,
                                          conv,
                                          *target[i],
                                          i,
                                          CLASSES=TRAIN_CLASSES)
                giou_loss += loss_items[0]
                conf_loss += loss_items[1]
                prob_loss += loss_items[2]

            total_loss = giou_loss + conf_loss + prob_loss

            gradients = tape.gradient(total_loss, yolo.trainable_variables)
            optimizer.apply_gradients(zip(gradients, yolo.trainable_variables))

            # update learning rate
            # about warmup: https://arxiv.org/pdf/1812.01187.pdf&usg=ALkJrhglKOPDjNt6SHGbphTHyMcT0cuMJg
            global_steps.assign_add(1)
            if global_steps < warmup_steps:  # and not TRAIN_TRANSFER:
                lr = global_steps / warmup_steps * TRAIN_LR_INIT
            else:
                lr = TRAIN_LR_END + 0.5 * (TRAIN_LR_INIT - TRAIN_LR_END) * (
                    (1 + tf.cos((global_steps - warmup_steps) /
                                (total_steps - warmup_steps) * np.pi)))
            optimizer.lr.assign(lr.numpy())

            # writing summary data
            with writer.as_default():
                tf.summary.scalar("lr", optimizer.lr, step=global_steps)
                tf.summary.scalar("loss/total_loss",
                                  total_loss,
                                  step=global_steps)
                tf.summary.scalar("loss/giou_loss",
                                  giou_loss,
                                  step=global_steps)
                tf.summary.scalar("loss/conf_loss",
                                  conf_loss,
                                  step=global_steps)
                tf.summary.scalar("loss/prob_loss",
                                  prob_loss,
                                  step=global_steps)
            writer.flush()

        return global_steps.numpy(), optimizer.lr.numpy(), giou_loss.numpy(
        ), conf_loss.numpy(), prob_loss.numpy(), total_loss.numpy()

    validate_writer = tf.summary.create_file_writer(TRAIN_LOGDIR)

    def validate_step(image_data, target):
        with tf.GradientTape() as tape:
            pred_result = yolo(image_data, training=False)
            giou_loss = conf_loss = prob_loss = 0

            # optimizing process
            grid = 3 if not TRAIN_YOLO_TINY else 2
            for i in range(grid):
                conv, pred = pred_result[i * 2], pred_result[i * 2 + 1]
                loss_items = compute_loss(pred,
                                          conv,
                                          *target[i],
                                          i,
                                          CLASSES=TRAIN_CLASSES)
                giou_loss += loss_items[0]
                conf_loss += loss_items[1]
                prob_loss += loss_items[2]

            total_loss = giou_loss + conf_loss + prob_loss

        return giou_loss.numpy(), conf_loss.numpy(), prob_loss.numpy(
        ), total_loss.numpy()

    best_val_loss = 1000  # should be large at start
    for epoch in range(TRAIN_EPOCHS):
        for image_data, target in trainset:
            results = train_step(image_data, target)
            cur_step = results[0] % steps_per_epoch
            print(
                "epoch:{:2.0f} step:{:5.0f}/{}, lr:{:.6f}, giou_loss:{:7.2f}, conf_loss:{:7.2f}, prob_loss:{:7.2f}, total_loss:{:7.2f}"
                .format(epoch, cur_step, steps_per_epoch, results[1],
                        results[2], results[3], results[4], results[5]))

        if len(testset) == 0:
            print("configure TEST options to validate model")
            yolo.save_weights(
                os.path.join(TRAIN_CHECKPOINTS_FOLDER, TRAIN_MODEL_NAME))
            continue

        count, giou_val, conf_val, prob_val, total_val = 0., 0, 0, 0, 0
        for image_data, target in testset:
            results = validate_step(image_data, target)
            count += 1
            giou_val += results[0]
            conf_val += results[1]
            prob_val += results[2]
            total_val += results[3]
        # writing validate summary data
        with validate_writer.as_default():
            tf.summary.scalar("validate_loss/total_val",
                              total_val / count,
                              step=epoch)
            tf.summary.scalar("validate_loss/giou_val",
                              giou_val / count,
                              step=epoch)
            tf.summary.scalar("validate_loss/conf_val",
                              conf_val / count,
                              step=epoch)
            tf.summary.scalar("validate_loss/prob_val",
                              prob_val / count,
                              step=epoch)
        validate_writer.flush()

        print(
            "\n\ngiou_val_loss:{:7.2f}, conf_val_loss:{:7.2f}, prob_val_loss:{:7.2f}, total_val_loss:{:7.2f}\n\n"
            .format(giou_val / count, conf_val / count, prob_val / count,
                    total_val / count))

        if TRAIN_SAVE_CHECKPOINT and not TRAIN_SAVE_BEST_ONLY:
            yolo.save_weights(
                os.path.join(
                    TRAIN_CHECKPOINTS_FOLDER, TRAIN_MODEL_NAME +
                    "_val_loss_{:7.2f}".format(total_val / count)))
        if TRAIN_SAVE_BEST_ONLY and best_val_loss > total_val / count:
            yolo.save_weights(
                os.path.join(TRAIN_CHECKPOINTS_FOLDER, TRAIN_MODEL_NAME))
            best_val_loss = total_val / count
        if not TRAIN_SAVE_BEST_ONLY and not TRAIN_SAVE_CHECKPOINT:
            yolo.save_weights(
                os.path.join(TRAIN_CHECKPOINTS_FOLDER, TRAIN_MODEL_NAME))
예제 #6
0
    "GPU ", "사용 가능"
    if tf.config.experimental.list_physical_devices("GPU") else "사용 불가능")

print(f'cv version {cv2.__version__}')

# tf.debugging.set_log_device_placement(True)

# %%
input_size = YOLO_INPUT_SIZE
Darknet_weights = YOLO_DARKNET_WEIGHTS
if TRAIN_YOLO_TINY:
    Darknet_weights = YOLO_DARKNET_TINY_WEIGHTS

# video_path = "./IMAGES/street_drive.mp4"

yolo = Create_Yolov3(input_size=input_size, CLASSES='../' + YOLO_COCO_CLASSES)
load_yolo_weights(yolo, '../' + Darknet_weights)  # use Darknet weights

print(f'weight data load ok {Darknet_weights}')

# %%
image_path = "../IMAGES/kite.jpg"

#이미지 로딩 & 전처리
original_image = cv2.imread(image_path)
original_image = cv2.cvtColor(original_image, cv2.COLOR_BGR2RGB)

# 0~255, 0~1 사이 소수로 바꾸고 크기도 416 싸이즈 안에 맞춰 집어 넣는다.
image_data = image_preprocess(np.copy(original_image),
                              [input_size, input_size])
#   Website     : https://pylessons.com/
#   GitHub      : https://github.com/pythonlessons/TensorFlow-2.x-YOLOv3
#   Description : object detection image and video example
#
#================================================================
import os
os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
import cv2
import numpy as np
import tensorflow as tf
from yolov3.yolov3 import Create_Yolov3
from yolov3.utils import load_yolo_weights, detect_image, detect_video, detect_realtime
from yolov3.configs import *

input_size = YOLO_INPUT_SIZE
Darknet_weights = YOLO_DARKNET_WEIGHTS
if TRAIN_YOLO_TINY:
    Darknet_weights = YOLO_DARKNET_TINY_WEIGHTS

image_path   = "./IMAGES/a.jpg"
video_path   = "./IMAGES/city.mp4"

yolo = Create_Yolov3(input_size=input_size, CLASSES="2_names.txt")
yolo.load_weights("./checkpoints/yolov3_custom") # use keras weights

orig, img, locs = detect_image(yolo, image_path, "x.jpg", input_size=input_size, show=True, CLASSES="2_names.txt", rectangle_colors=(255,0,0))
print("Pokemon(s) Found in Image at:")
print("[Start X, Start Y, End X, End Y]:", locs)
#detect_video(yolo, video_path, './IMAGES/detected.mp4', input_size=input_size, show=False, CLASSES=TRAIN_CLASSES, rectangle_colors=(255,0,0))
#detect_realtime(yolo, '', input_size=input_size, show=True, CLASSES=TRAIN_CLASSES, rectangle_colors=(255, 0, 0))
예제 #8
0
import os

os.environ['CUDA_VISIBLE_DEVICES'] = '0'
import cv2
import numpy as np
import tensorflow as tf
from tensorflow.python.saved_model import tag_constants
from yolov3.yolov3 import Create_Yolov3
from yolov3.utils import load_yolo_weights, detect_image
from yolov3.config import *

image_path = "./data/images/kite.jpg"

Darknet_weights = YOLO_V3_WEIGHTS

yolo = Create_Yolov3(input_size=YOLO_INPUT_SIZE, CLASSES=YOLO_COCO_CLASSES)
load_yolo_weights(yolo, Darknet_weights)  # use Darknet weights

detect_image(yolo,
             image_path,
             "./data/images/pred_kite.jpg",
             input_size=YOLO_INPUT_SIZE,
             show=True,
             rectangle_colors=(255, 0, 0))
예제 #9
0
save_best_only = True # saves only best agent according validation loss
save_checkpoints = False # saves all best validates checkpoints in training process (may require a lot disk space)

if os.path.exists(logdir): shutil.rmtree(logdir)
writer = tf.summary.create_file_writer(logdir)

trainset = Dataset('train')
testset = Dataset('test')
steps_per_epoch = len(trainset)
global_steps = tf.Variable(1, trainable=False, dtype=tf.int64)
warmup_steps = TRAIN_WARMUP_EPOCHS * steps_per_epoch
total_steps = TRAIN_EPOCHS * steps_per_epoch

if TRAIN_TRANSFER:
    Darknet = Create_Yolov3(input_size=input_size)
    load_yolo_weights(Darknet, Darknet_weights) # use darknet weights

yolo = Create_Yolov3(input_size=input_size, training=True, CLASSES=TRAIN_CLASSES)

if TRAIN_TRANSFER:
    for i, l in enumerate(Darknet.layers):
        layer_weights = l.get_weights()
        if layer_weights != []:
            try:
                yolo.layers[i].set_weights(layer_weights)
            except:
                print("skipping", yolo.layers[i].name)

optimizer = tf.keras.optimizers.Adam()