Exemplo n.º 1
0
def plate_number(image_path):
    yolo = Create_Yolo(input_size=YOLO_INPUT_SIZE, CLASSES=TRAIN_CLASSES)
    yolo.load_weights("./checkpoints/yolov3_custom")  # use keras weights
    image, bb = detect_image(yolo,
                             image_path,
                             '',
                             input_size=YOLO_INPUT_SIZE,
                             show=False,
                             rectangle_colors=(255, 0, 0))
    image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
    c = np.array(bb[0][:4], dtype=np.int32)
    org_image = cv2.imread(image_path)
    cropped = org_image[c[1]:c[3], c[0]:c[2]]
    cropped = cv2.cvtColor(cropped, cv2.COLOR_BGR2RGB)
    cropped = cv2.resize(cropped, (224, 224))
    gray = cv2.cvtColor(cropped, cv2.COLOR_BGR2GRAY)
    blur = cv2.GaussianBlur(gray, (7, 7), 0)
    binary = cv2.threshold(blur, 180, 255,
                           cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)[1]
    kernel3 = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3))
    thre_mor = cv2.morphologyEx(binary, cv2.MORPH_DILATE, kernel3)
    cont, _ = cv2.findContours(binary, cv2.RETR_EXTERNAL,
                               cv2.CHAIN_APPROX_SIMPLE)

    # creat a copy version "test_roi" of plat_image to draw bounding box

    test_roi = cropped.copy()

    # Initialize a list which will be used to append charater image
    crop_characters = []

    # define standard width and height of character
    digit_w, digit_h = 30, 60

    for c in sort_contours(cont):
        (x, y, w, h) = cv2.boundingRect(c)
        ratio = h / w
        if 3 <= ratio < 5:  # Only select contour with defined ratio
            #if h/plate_image.shape[0]>=0.5: # Select contour which has the height larger than 50% of the plate
            # Draw bounding box arroung digit number
            cv2.rectangle(test_roi, (x, y), (x + w, y + h), (0, 255, 0), 2)

            # Sperate number and gibe prediction
            curr_num = thre_mor[y:y + h, x:x + w]
            curr_num = cv2.resize(curr_num, dsize=(digit_w, digit_h))
            _, curr_num = cv2.threshold(curr_num, 220, 255,
                                        cv2.THRESH_BINARY + cv2.THRESH_OTSU)
            crop_characters.append(curr_num)

    #print("Detect {} letters...".format(len(crop_characters)))
    final_string = ''
    for i, character in enumerate(crop_characters):
        title = np.array2string(predict_from_model(character))
        final_string += title.strip("'[]")

    return final_string
Exemplo n.º 2
0
def main():
    webapi = WebAPI(IP_ADDR, PORT, ACCOUNT, PASSWORD)

    cameras = webapi.list_cameras()
    camera_ids = [camera['id'] for camera in cameras]

    # the last added camera in the Surveillance Station
    camera_id = camera_ids[-1]
    rtsp = webapi.get_liveview_rtsp(camera_id)

    fall_label = webapi.create_recording_label('fall_event')

    # Initialize fall detection model
    yolo = Create_Yolo(input_size=YOLO_INPUT_SIZE)
    load_yolo_weights(yolo, YOLO_V3_WEIGHTS)  # use Darknet weights
    classes = read_class_names(YOLO_COCO_CLASSES)

    q = queue.Queue()

    p1 = threading.Thread(target=read_frame, args=([q, rtsp]))
    p2 = threading.Thread(
        target=process_frame,
        args=([q, webapi, camera_id, yolo, classes, fall_label]))
    p1.start()
    p2.start()

    p1.join()
    p2.join()

    webapi.logout()
Exemplo n.º 3
0
def main():
    # Inicialização

    # Carregar a rede neural YOLO
    time_start = datetime.now()
    checkpoints_path = "checkpoints/yolov3_C920-13all-50epochs_Tiny"
    yolo = Create_Yolo(input_size=416, CLASSES="model_data/classes.txt")
    yolo.load_weights(checkpoints_path)

    # cam = cv.VideoCapture(0)
    # cam.set(cv.CAP_PROP_FRAME_WIDTH, 1920)   #FULL_HD
    # cam.set(cv.CAP_PROP_FRAME_HEIGHT, 1080)
    cv.namedWindow("Camera", cv.WINDOW_NORMAL)
    cv.namedWindow("PCB Left", cv.WINDOW_NORMAL)
    cv.namedWindow("PCB Right", cv.WINDOW_NORMAL)
    frame = cv.imread("/home/jacq/Documentos/datasets/C920HD-3-renamed/opencv_frame_80.png") # Isto aqui apenas para testar
    #print(frame.shape)
    # Loop infinito 
    time_end = datetime.now()
    print("Tempo de carregamento do modelo: "+str(time_end-time_start))
    while True:
        # ret, frame = cam.read()
        # if not ret:
        #     print("failed to grab frame")
        #     break
        cv.imshow("Camera", frame)
        k = cv.waitKey(1)
        if k%256 == 27:             # ESC pressed
            #print("Escape hit, closing...")
            break
        elif k%256 == 32:           # SPACE pressed
            # Chamada a função que processa o frame
            time_start = datetime.now()
            frame = cv.imread("/home/jacq/Documentos/datasets/C920HD-3-renamed/opencv_frame_80.png") # Isto aqui apenas para testar
            pcbR, pcbL = makeDetection(frame, yolo) #para esta função podemos passar modelo
            cv.imshow("PCB Left", pcbL)
            cv.imshow("PCB Right", pcbR)
            time_end = datetime.now()
            print("Tempo de processamento: "+str(time_end-time_start))

    cv.destroyAllWindows()
Exemplo n.º 4
0
def main():
    yolo = Create_Yolo(input_size=416, CLASSES="tools/labels.txt")

    checkpoints_path = "checkpoints/yolov3_custom_Tiny"
    yolo.load_weights(checkpoints_path)

    # yolo.save('save/yolov3')
    yolo.save('save/yolov3.h5')
Exemplo n.º 5
0
def process_recordings(q, webapi):
    # Initialize clothe category classifier
    cfg = Config.fromfile(CONFIG_FILE)

    landmark_tensor = torch.zeros(8)

    model = build_predictor(cfg.model)
    load_checkpoint(model, CHECKPOINT_FILE, map_location='cpu')
    print('model loaded from {}'.format(CHECKPOINT_FILE))
    if USE_CUDA:
        model.cuda()
        landmark_tensor = landmark_tensor.cuda()

    model.eval()
    cate_predictor = CatePredictor(cfg.data.test, tops_type=[1])

    # Initialize tracker model
    yolo = Create_Yolo(input_size=YOLO_INPUT_SIZE)
    load_yolo_weights(yolo, Darknet_weights)  # use Darknet weights

    while True:
        if q.empty():
            time.sleep(1)
            continue

        recording_id, recording_filename = q.get()

        if recording_id == -1:
            break

        recording_filepath = os.path.join(RECORDINGS_DIRECTORY,
                                          recording_filename)
        Object_tracking(yolo,
                        webapi,
                        recording_id,
                        recording_filepath,
                        model,
                        cate_predictor,
                        landmark_tensor,
                        iou_threshold=0.1,
                        rectangle_colors=(255, 0, 0),
                        Track_only=["person"])
Exemplo n.º 6
0
            ap_dictionary[class_name] = ap

        results_file.write("\n# mAP of all classes\n")
        mAP = sum_AP / n_classes

        text = "mAP = {:.3f}%, {:.2f} FPS".format(mAP * 100, fps)
        results_file.write(text + "\n")
        print(text)

        return mAP * 100


if __name__ == '__main__':
    if YOLO_CUSTOM_WEIGHTS != False:
        weights = YOLO_CUSTOM_WEIGHTS
        yolo = Create_Yolo(input_size=YOLO_INPUT_SIZE, CLASSES=TRAIN_CLASSES)
        yolo.load_weights(weights)
    else:
        if YOLO_TYPE == "yolov4":
            Darknet_weights = YOLO_V4_TINY_WEIGHTS if TRAIN_YOLO_TINY else YOLO_V4_WEIGHTS
        if YOLO_TYPE == "yolov3":
            Darknet_weights = YOLO_V3_TINY_WEIGHTS if TRAIN_YOLO_TINY else YOLO_V3_WEIGHTS

        yolo = Create_Yolo(input_size=YOLO_INPUT_SIZE)
        load_yolo_weights(yolo, Darknet_weights)  # use Darknet weights

    testset = Dataset('test', TEST_INPUT_SIZE=YOLO_INPUT_SIZE)
    get_mAP(yolo,
            testset,
            score_threshold=0.05,
            iou_threshold=0.50,
Exemplo n.º 7
0
def main():
    global TRAIN_FROM_CHECKPOINT

    gpus = tf.config.experimental.list_physical_devices('GPU')
    print(f'GPUs {gpus}')
    if len(gpus) > 0:
        try:
            tf.config.experimental.set_memory_growth(gpus[0], True)
        except RuntimeError:
            pass

    if os.path.exists(TRAIN_LOGDIR): shutil.rmtree(TRAIN_LOGDIR)
    writer = tf.summary.create_file_writer(TRAIN_LOGDIR)

    trainset = Dataset('train')
    testset = Dataset('test')

    steps_per_epoch = len(trainset)
    global_steps = tf.Variable(1, trainable=False, dtype=tf.int64)
    warmup_steps = TRAIN_WARMUP_EPOCHS * steps_per_epoch
    total_steps = TRAIN_EPOCHS * steps_per_epoch

    if TRAIN_TRANSFER:
        Darknet = Create_Yolo(input_size=YOLO_INPUT_SIZE,
                              CLASSES=YOLO_COCO_CLASSES)
        load_yolo_weights(Darknet, Darknet_weights)  # use darknet weights

    yolo = Create_Yolo(input_size=YOLO_INPUT_SIZE,
                       training=True,
                       CLASSES=TRAIN_CLASSES)
    if TRAIN_FROM_CHECKPOINT:
        try:
            yolo.load_weights(TRAIN_FROM_CHECKPOINT)
        except ValueError:
            print("Shapes are incompatible, transfering Darknet weights")
            TRAIN_FROM_CHECKPOINT = False

    if TRAIN_TRANSFER and not TRAIN_FROM_CHECKPOINT:
        for i, l in enumerate(Darknet.layers):
            layer_weights = l.get_weights()
            if layer_weights != []:
                try:
                    yolo.layers[i].set_weights(layer_weights)
                except:
                    print("skipping", yolo.layers[i].name)

    optimizer = tf.keras.optimizers.Adam()

    def train_step(image_data, target):
        with tf.GradientTape() as tape:
            pred_result = yolo(image_data, training=True)
            giou_loss = conf_loss = prob_loss = 0

            # optimizing process
            grid = 3 if not TRAIN_YOLO_TINY else 2
            for i in range(grid):
                conv, pred = pred_result[i * 2], pred_result[i * 2 + 1]
                loss_items = compute_loss(pred,
                                          conv,
                                          *target[i],
                                          i,
                                          CLASSES=TRAIN_CLASSES)
                giou_loss += loss_items[0]
                conf_loss += loss_items[1]
                prob_loss += loss_items[2]

            total_loss = giou_loss + conf_loss + prob_loss

            gradients = tape.gradient(total_loss, yolo.trainable_variables)
            optimizer.apply_gradients(zip(gradients, yolo.trainable_variables))

            # update learning rate
            # about warmup: https://arxiv.org/pdf/1812.01187.pdf&usg=ALkJrhglKOPDjNt6SHGbphTHyMcT0cuMJg
            global_steps.assign_add(1)
            if global_steps < warmup_steps:  # and not TRAIN_TRANSFER:
                lr = global_steps / warmup_steps * TRAIN_LR_INIT
            else:
                lr = TRAIN_LR_END + 0.5 * (TRAIN_LR_INIT - TRAIN_LR_END) * (
                    (1 + tf.cos((global_steps - warmup_steps) /
                                (total_steps - warmup_steps) * np.pi)))
            optimizer.lr.assign(lr.numpy())

            # writing summary data
            with writer.as_default():
                tf.summary.scalar("lr", optimizer.lr, step=global_steps)
                tf.summary.scalar("loss/total_loss",
                                  total_loss,
                                  step=global_steps)
                tf.summary.scalar("loss/giou_loss",
                                  giou_loss,
                                  step=global_steps)
                tf.summary.scalar("loss/conf_loss",
                                  conf_loss,
                                  step=global_steps)
                tf.summary.scalar("loss/prob_loss",
                                  prob_loss,
                                  step=global_steps)
            writer.flush()

        return global_steps.numpy(), optimizer.lr.numpy(), giou_loss.numpy(
        ), conf_loss.numpy(), prob_loss.numpy(), total_loss.numpy()

    validate_writer = tf.summary.create_file_writer(TRAIN_LOGDIR)

    def validate_step(image_data, target):
        with tf.GradientTape() as tape:
            pred_result = yolo(image_data, training=False)
            giou_loss = conf_loss = prob_loss = 0

            # optimizing process
            grid = 3 if not TRAIN_YOLO_TINY else 2
            for i in range(grid):
                conv, pred = pred_result[i * 2], pred_result[i * 2 + 1]
                loss_items = compute_loss(pred,
                                          conv,
                                          *target[i],
                                          i,
                                          CLASSES=TRAIN_CLASSES)
                giou_loss += loss_items[0]
                conf_loss += loss_items[1]
                prob_loss += loss_items[2]

            total_loss = giou_loss + conf_loss + prob_loss

        return giou_loss.numpy(), conf_loss.numpy(), prob_loss.numpy(
        ), total_loss.numpy()

    mAP_model = Create_Yolo(
        input_size=YOLO_INPUT_SIZE,
        CLASSES=TRAIN_CLASSES)  # create second model to measure mAP

    best_val_loss = 1000  # should be large at start
    for epoch in range(TRAIN_EPOCHS):
        for image_data, target in trainset:
            results = train_step(image_data, target)
            cur_step = results[0] % steps_per_epoch
            print(
                "epoch:{:2.0f} step:{:5.0f}/{}, lr:{:.6f}, giou_loss:{:7.2f}, conf_loss:{:7.2f}, prob_loss:{:7.2f}, total_loss:{:7.2f}"
                .format(epoch, cur_step, steps_per_epoch, results[1],
                        results[2], results[3], results[4], results[5]))

        if len(testset) == 0:
            print("configure TEST options to validate model")
            yolo.save_weights(
                os.path.join(TRAIN_CHECKPOINTS_FOLDER, TRAIN_MODEL_NAME))
            continue

        count, giou_val, conf_val, prob_val, total_val = 0., 0, 0, 0, 0
        for image_data, target in testset:
            results = validate_step(image_data, target)
            count += 1
            giou_val += results[0]
            conf_val += results[1]
            prob_val += results[2]
            total_val += results[3]
        # writing validate summary data
        with validate_writer.as_default():
            tf.summary.scalar("validate_loss/total_val",
                              total_val / count,
                              step=epoch)
            tf.summary.scalar("validate_loss/giou_val",
                              giou_val / count,
                              step=epoch)
            tf.summary.scalar("validate_loss/conf_val",
                              conf_val / count,
                              step=epoch)
            tf.summary.scalar("validate_loss/prob_val",
                              prob_val / count,
                              step=epoch)
        validate_writer.flush()

        print(
            "\n\ngiou_val_loss:{:7.2f}, conf_val_loss:{:7.2f}, prob_val_loss:{:7.2f}, total_val_loss:{:7.2f}\n\n"
            .format(giou_val / count, conf_val / count, prob_val / count,
                    total_val / count))

        if TRAIN_SAVE_CHECKPOINT and not TRAIN_SAVE_BEST_ONLY:
            save_directory = os.path.join(
                TRAIN_CHECKPOINTS_FOLDER, TRAIN_MODEL_NAME +
                "_val_loss_{:7.2f}".format(total_val / count))
            print("+++++++++++++++++++++++++++++++++++++++++++++++++++")
            yolo.save_weights(save_directory)
        if TRAIN_SAVE_BEST_ONLY and best_val_loss > total_val / count:
            save_directory = os.path.join(TRAIN_CHECKPOINTS_FOLDER,
                                          TRAIN_MODEL_NAME)
            print("====================================================")
            yolo.save_weights(save_directory)
            best_val_loss = total_val / count
        if not TRAIN_SAVE_BEST_ONLY and not TRAIN_SAVE_CHECKPOINT:
            save_directory = os.path.join(TRAIN_CHECKPOINTS_FOLDER,
                                          TRAIN_MODEL_NAME)
            print("?????????????????????????????????????????????????????????")
            yolo.save_weights(save_directory)
        print(
            "###################################################################################"
        )

    print(".................................................................")
    # measure mAP of trained custom model
    mAP_model.load_weights(save_directory)  # use keras weights
    get_mAP(mAP_model,
            testset,
            score_threshold=TEST_SCORE_THRESHOLD,
            iou_threshold=TEST_IOU_THRESHOLD)
Exemplo n.º 8
0
        results_file.write("\n# mAP of all classes\n")
        mAP = sum_AP / n_classes

        text = "mAP = {:.3f}%, {:.2f} FPS".format(mAP*100, fps)
        results_file.write(text + "\n")
        print(text)
        
        return mAP*100

if __name__ == '__main__':       
    if YOLO_FRAMEWORK == "tf": # TensorFlow detection
        if YOLO_TYPE == "yolov4":
            Darknet_weights = YOLO_V4_TINY_WEIGHTS if TRAIN_YOLO_TINY else YOLO_V4_WEIGHTS
        if YOLO_TYPE == "yolov3":
            Darknet_weights = YOLO_V3_TINY_WEIGHTS if TRAIN_YOLO_TINY else YOLO_V3_WEIGHTS

        if YOLO_CUSTOM_WEIGHTS == False:
            yolo = Create_Yolo(input_size=YOLO_INPUT_SIZE, CLASSES=YOLO_COCO_CLASSES)
            load_yolo_weights(yolo, Darknet_weights) # use Darknet weights
        else:
            yolo = Create_Yolo(input_size=YOLO_INPUT_SIZE, CLASSES=TRAIN_CLASSES)
            yolo.load_weights(YOLO_CUSTOM_WEIGHTS) # use custom weights
        
    elif YOLO_FRAMEWORK == "trt": # TensorRT detection
        saved_model_loaded = tf.saved_model.load(YOLO_CUSTOM_WEIGHTS, tags=[tag_constants.SERVING])
        signature_keys = list(saved_model_loaded.signatures.keys())
        yolo = saved_model_loaded.signatures['serving_default']

    testset = Dataset('test', TEST_INPUT_SIZE=YOLO_INPUT_SIZE)
    get_mAP(yolo, testset, score_threshold=0.05, iou_threshold=0.50, TEST_INPUT_SIZE=YOLO_INPUT_SIZE)
Exemplo n.º 9
0
import os
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
import cv2
import numpy as np
import random
import time
import tensorflow as tf
from yolov3.yolov4 import Create_Yolo
from yolov3.utils import detect_image
from yolov3.configs import *

while True:
    ID = random.randint(0, 200)
    label_txt = "mnist/mnist_test.txt"
    image_info = open(label_txt).readlines()[ID].split()

    image_path = image_info[0]

    yolo = Create_Yolo(input_size=YOLO_INPUT_SIZE, CLASSES=TRAIN_CLASSES)
    yolo.load_weights(f"./checkpoints/{TRAIN_MODEL_NAME}")

    detect_image(yolo,
                 image_path,
                 "mnist_test.jpg",
                 input_size=YOLO_INPUT_SIZE,
                 show=True,
                 CLASSES=TRAIN_CLASSES,
                 rectangle_colors=(255, 0, 0))
from yolov3.configs import *
import time

from deep_sort import nn_matching
from deep_sort.detection import Detection
from deep_sort.tracker import Tracker
from deep_sort import generate_detections as gdet

video_path = "./IMAGES/test.mp4"

if YOLO_FRAMEWORK == "tf":  # TensorFlow detection
    if YOLO_TYPE == "yolov4":
        Darknet_weights = YOLO_V4_TINY_WEIGHTS if TRAIN_YOLO_TINY else YOLO_V4_WEIGHTS
    if YOLO_TYPE == "yolov3":
        Darknet_weights = YOLO_V3_TINY_WEIGHTS if TRAIN_YOLO_TINY else YOLO_V3_WEIGHTS
    yolo = Create_Yolo(input_size=YOLO_INPUT_SIZE)
    if YOLO_CUSTOM_WEIGHTS != False:
        yolo.load_weights(YOLO_CUSTOM_WEIGHTS)  # use custom weights
    else:
        load_yolo_weights(yolo, Darknet_weights)  # use MS COCO weights

elif YOLO_FRAMEWORK == "trt":  # TensorRT detection
    saved_model_loaded = tf.saved_model.load(YOLO_CUSTOM_WEIGHTS,
                                             tags=[tag_constants.SERVING])
    signature_keys = list(saved_model_loaded.signatures.keys())
    yolo = saved_model_loaded.signatures['serving_default']


def Object_tracking(Yolo,
                    video_path,
                    output_path,
Exemplo n.º 11
0
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
import sys

foldername = os.path.basename(os.getcwd())
if foldername == "tools":
    os.chdir("..")
sys.path.insert(1, os.getcwd())

import tensorflow as tf
from yolov3.yolov4 import Create_Yolo
from yolov3.utils import load_yolo_weights, Load_Yolo_model
from yolov3.configs import *

if YOLO_TYPE == "yolov4":
    Darknet_weights = YOLO_V4_TINY_WEIGHTS if TRAIN_YOLO_TINY else YOLO_V4_WEIGHTS
if YOLO_TYPE == "yolov3":
    Darknet_weights = YOLO_V3_TINY_WEIGHTS if TRAIN_YOLO_TINY else YOLO_V3_WEIGHTS

if YOLO_CUSTOM_WEIGHTS == False:
    yolo = Create_Yolo(input_size=YOLO_INPUT_SIZE, CLASSES=YOLO_COCO_CLASSES)
    load_yolo_weights(yolo, Darknet_weights)  # use Darknet weights
else:
    #yolo = Create_Yolo(input_size=YOLO_INPUT_SIZE, CLASSES=TRAIN_CLASSES)
    #yolo.load_weights(YOLO_CUSTOM_WEIGHTS) # use custom weights
    yolo = Load_Yolo_model()

yolo.summary()
yolo.save(f'./checkpoints/{YOLO_TYPE}-{YOLO_INPUT_SIZE}')

print(f"model saves to /checkpoints/{YOLO_TYPE}-{YOLO_INPUT_SIZE}")
Exemplo n.º 12
0
from deep_sort.detection import Detection
from deep_sort.tracker import Tracker
from deep_sort import generate_detections as gdet
import argparse
import imutils

ap = argparse.ArgumentParser()
ap.add_argument('-i', '--input', required=True)
ap.add_argument('-o', "--output", required=True)
args = vars(ap.parse_args())

video_path = args["input"]
output_path = args["output"]

Darknet_weights = YOLO_V3_WEIGHTS
yolo = Create_Yolo(input_size=YOLO_INPUT_SIZE)

if YOLO_CUSTOM_WEIGHTS != False:
    yolo.load_weights(YOLO_CUSTOM_WEIGHTS)
else:
    load_yolo_weights(yolo, Darknet_weights)

max_cosine_distance = 0.5
nn_budget = None
input_size = YOLO_INPUT_SIZE
score_threshold = 0.5
iou_threshold = 0.45
track_obj = 'person'

model_filename = 'model_data/mars-small128.pb'
encoder = gdet.create_box_encoder(model_filename, batch_size=1)
Exemplo n.º 13
0
import os
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
import cv2
import numpy as np
import tensorflow as tf
#from yolov3.yolov3 import Create_Yolov3
from yolov3.yolov4 import Create_Yolo
from yolov3.utils import load_yolo_weights, detect_image, detect_video, detect_realtime
from yolov3.configs import *

if YOLO_TYPE == "yolov4":
    Darknet_weights = YOLO_V4_TINY_WEIGHTS if TRAIN_YOLO_TINY else YOLO_V4_WEIGHTS
if YOLO_TYPE == "yolov3":
    Darknet_weights = YOLO_V3_TINY_WEIGHTS if TRAIN_YOLO_TINY else YOLO_V3_WEIGHTS

image_path = "./IMAGES/plate_2.jpg"
video_path = "./IMAGES/test.mp4"

yolo = Create_Yolo(input_size=YOLO_INPUT_SIZE, CLASSES=TRAIN_CLASSES)
yolo.load_weights("./checkpoints/yolov3_custom")  # use keras weights

detect_image(yolo,
             image_path,
             "./IMAGES/plate_1_detect.jpg",
             input_size=YOLO_INPUT_SIZE,
             show=True,
             CLASSES=TRAIN_CLASSES,
             rectangle_colors=(255, 0, 0))
#detect_video(yolo, video_path, './IMAGES/detected.mp4', input_size=YOLO_INPUT_SIZE, show=False, CLASSES=TRAIN_CLASSES, rectangle_colors=(255,0,0))
#detect_realtime(yolo, '', input_size=YOLO_INPUT_SIZE, show=True, CLASSES=TRAIN_CLASSES, rectangle_colors=(255, 0, 0))
Exemplo n.º 14
0
        text = "mAP = {:.3f}%, {:.2f} FPS".format(mAP * 100, fps)
        results_file.write(text + "\n")
        print(text)

        return mAP * 100


if __name__ == '__main__':
    if YOLO_FRAMEWORK == "tf":  # TensorFlow detection
        if YOLO_TYPE == "yolov4":
            Darknet_weights = YOLO_V4_TINY_WEIGHTS if TRAIN_YOLO_TINY else YOLO_V4_WEIGHTS
        if YOLO_TYPE == "yolov3":
            Darknet_weights = YOLO_V3_TINY_WEIGHTS if TRAIN_YOLO_TINY else YOLO_V3_WEIGHTS

        if YOLO_CUSTOM_WEIGHTS == False:
            yolo = Create_Yolo(input_size=YOLO_INPUT_SIZE,
                               CLASSES=YOLO_COCO_CLASSES)
            load_yolo_weights(yolo, Darknet_weights)  # use Darknet weights
        else:
            yolo = Create_Yolo(input_size=YOLO_INPUT_SIZE,
                               CLASSES=TRAIN_CLASSES)
            yolo.load_weights("./checkpoints/{}").format(
                TRAIN_MODEL_NAME)  # use custom weights

    elif YOLO_FRAMEWORK == "trt":  # TensorRT detection
        saved_model_loaded = tf.saved_model.load(
            "./checkpoints/{}".format(TRAIN_MODEL_NAME),
            tags=[tag_constants.SERVING])
        signature_keys = list(saved_model_loaded.signatures.keys())
        yolo = saved_model_loaded.signatures['serving_default']

    testset = Dataset('test', TEST_INPUT_SIZE=YOLO_INPUT_SIZE)
Exemplo n.º 15
0
def main():

    NUM_CLASS = CLASSES

    yolo = Create_Yolo(input_size=416, CLASSES=CLASSES)
    yolo.load_weights(f"yolov3_custom_Tiny")

    cap = cv2.VideoCapture(0)
    if cap.isOpened():
        current_digit = None
        video_thread = None
        video_names = os.listdir(VIDEOS_DIR)
        while True:

            _, frame = cap.read()
            image_h, image_w, _ = frame.shape
            # lt = time()
            bboxes = detect_digit(yolo, frame, CLASSES)

            bbox_thick = int(0.6 * (image_h + image_w) / 1000)
            if bbox_thick < 1:
                bbox_thick = 1
            # fontScale = 0.75 * bbox_thick

            digits = []
            for bbox in bboxes:
                coor = np.array(bbox[:4], dtype=np.int32)
                score = bbox[4]
                if score > 0.8:
                    class_ind = int(bbox[5])
                    digit = NUM_CLASS[class_ind]

                    if current_digit != digit:

                        digits.append(digit)

                        frame = cap.read()[1]

                        (x1, y1), (x2, y2) = (coor[0], coor[1]), (coor[2],
                                                                  coor[3])

                        cv2.rectangle(frame, (x1, y1), (x2, y2), (255, 0, 0),
                                      bbox_thick * 2)

                        for video_name in video_names:
                            if digit == video_name.split('.')[0]:

                                if video_thread and video_thread.is_alive():
                                    video_thread.do_run = False
                                    video_thread.join()
                                    current_digit = None
                                else:
                                    video_thread = Thread(target=show_video,
                                                          args=(video_name,
                                                                digit, frame,
                                                                coor))
                                    video_thread.start()
                                    current_digit = digit

                                break
                    if video_thread and not video_thread.is_alive():
                        current_digit = None
            k = cv2.waitKey(33)

            if k == 27:
                break

            cv2.imshow('frame', frame)

    else:
        print("Камера не найдена")

    cap.release()
    cv2.destroyAllWindows()
Exemplo n.º 16
0
refID = []
try:
    file = open("sudoku/sudoku.names", "rt")
    lines = file.readlines()
    file.close()
    print(lines)

    for line in lines:
        line = line.strip()
        if not line == '':
            refID.append(line)
except FileNotFoundError:
    print("Reference ID not found!")
    quit()

yolo = Create_Yolo(input_size=YOLO_INPUT_SIZE, CLASSES=TRAIN_CLASSES)

fname = f"./checkpoints/{TRAIN_MODEL_NAME}"
print("weight = {}".format(fname))
yolo.load_weights(fname)  # use keras weights
print("weights loaded")

webcam = cv2.VideoCapture("/dev/video0")
webcam.set(cv2.CAP_PROP_FRAME_WIDTH, camWidth)
webcam.set(cv2.CAP_PROP_FRAME_HEIGHT, camHeight)

cv2.namedWindow("img", cv2.WINDOW_NORMAL)
cv2.resizeWindow("img", camWidth * 3, camHeight)

# --------  main loop -------
import numpy as np
import tensorflow as tf

from utils import setup_tf_conf
setup_tf_conf()

#from yolov3.yolov3 import Create_Yolov3
from yolov3.yolov4 import Create_Yolo
from yolov3.utils import load_yolo_weights, detect_image, detect_video, detect_realtime
from yolov3.configs import *

if YOLO_TYPE == "yolov4":
    Darknet_weights = YOLO_V4_TINY_WEIGHTS if TRAIN_YOLO_TINY else YOLO_V4_WEIGHTS
if YOLO_TYPE == "yolov3":
    Darknet_weights = YOLO_V3_TINY_WEIGHTS if TRAIN_YOLO_TINY else YOLO_V3_WEIGHTS

image_path = "./IMAGES/kite.jpg"
video_path = "./IMAGES/test.mp4"

yolo = Create_Yolo(input_size=YOLO_INPUT_SIZE)
load_yolo_weights(yolo, Darknet_weights)  # use Darknet weights

#detect_image(yolo, image_path, '', input_size=YOLO_INPUT_SIZE, show=True, rectangle_colors=(255,0,0))
detect_video(yolo,
             video_path,
             './IMAGES/test_pred.mp4',
             input_size=YOLO_INPUT_SIZE,
             show=False,
             rectangle_colors=(255, 0, 0))
#detect_realtime(yolo, '', input_size=YOLO_INPUT_SIZE, show=True, rectangle_colors=(255, 0, 0))
Exemplo n.º 18
0
import os
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
import sys

foldername = os.path.basename(os.getcwd())
if foldername == "tools":
    os.chdir("..")
sys.path.insert(1, os.getcwd())

import tensorflow as tf
from yolov3.yolov4 import Create_Yolo
from yolov3.utils import load_yolo_weights
from yolov3.configs import *

if YOLO_TYPE == "yolov4":
    Darknet_weights = YOLO_V4_TINY_WEIGHTS if TRAIN_YOLO_TINY else YOLO_V4_WEIGHTS
if YOLO_TYPE == "yolov3":
    Darknet_weights = YOLO_V3_TINY_WEIGHTS if TRAIN_YOLO_TINY else YOLO_V3_WEIGHTS

if YOLO_CUSTOM_WEIGHTS == False:
    yolo = Create_Yolo(input_size=YOLO_INPUT_SIZE, CLASSES=YOLO_COCO_CLASSES)
    load_yolo_weights(yolo, Darknet_weights)  # use Darknet weights
else:
    yolo = Create_Yolo(input_size=YOLO_INPUT_SIZE, CLASSES=TRAIN_CLASSES)
    yolo.load_weights(YOLO_CUSTOM_WEIGHTS)  # use custom weights

yolo.summary()
yolo.save(f'./checkpoints/{YOLO_TYPE}-{YOLO_INPUT_SIZE}')

print(f"model saves to /checkpoints/{YOLO_TYPE}-{YOLO_INPUT_SIZE}")