Esempio n. 1
0
def build_model(cfg, classes='classes'):
    yolo = YOLOv4(tiny=cfg.model.tiny)
    yolo.classes = classes
    yolo.input_size = (cfg.model.input_size, cfg.model.input_size)
    yolo.batch_size = cfg.train.batch_size
    make_model(yolo)
    return yolo
Esempio n. 2
0
def get_yolo(img_size):

    yolo = YOLOv4()
    yolo.classes = base_path + "coco.names"
    yolo.input_size = img_size
    yolo.make_model()
    yolo.load_weights(base_path + "yolov4.weights", weights_type="yolo")

    return yolo
Esempio n. 3
0
 def load_model():
     from yolov4.tf import YOLOv4
     """ Load the model from disk. This should be called before the predict API """
     start_time = time.time()
     # LOGGER.info("Start loading the YoloV4 model")
     YoloV4Wrapper.yolo = YOLOv4()
     YoloV4Wrapper.yolo.classes = YOLOV4_CLASSES
     YoloV4Wrapper.yolo.make_model()
     YoloV4Wrapper.yolo.load_weights(YOLOV4_WEIGHTS, weights_type="yolo")
     YoloV4Wrapper.is_model_loaded = True
     exec_time = time.time() - start_time
Esempio n. 4
0
 def __init__(self):
     self.yolo = YOLOv4()
     self.yolo.classes = './coco.names'
     self.yolo.make_model()
     self.yolo.load_weights("./model/yolov4-custom_last.weights",
                            weights_type="yolo")
     self.config = Cfg.load_config()
     self.config['weights'] = './model/transformerocr.pth'
     self.config['predictor']['beamsearch'] = False
     self.config['device'] = 'cpu'
     self.detector = Predictor(self.config)
     self.classes = ['id', 'name', 'dmy', 'add1', 'add2']
     self.res = dict.fromkeys(self.classes, '')
import cv2

from yolov4.tf import YOLOv4

yolo = YOLOv4()

yolo.config.parse_names("coco.names")
yolo.config.parse_cfg("yolov4-tiny.cfg")   #Remember to change weight source both for .cfg and .weights

yolo.make_model()
yolo.load_weights("yolov4-tiny.weights", weights_type="yolo")
yolo.summary(summary_type="yolo")
yolo.summary()

##yolo.inference(media_path="download.jpeg")

##yolo.inference(media_path="road.mp4", is_image=False)

#To find camera sources available, run ls -ltrh /dev/video*

yolo.inference(
   "/dev/video6",             #video0 is webcam, video6 is realsense RGB
   is_image=False,
   cv_apiPreference=cv2.CAP_V4L2,
   cv_frame_size=(640, 480),
   cv_fourcc="YUYV",
)
Esempio n. 6
0
from tensorflow.keras import callbacks, optimizers
from yolov4.tf import SaveWeightsCallback, YOLOv4
import time

yolo = YOLOv4(tiny=True)
yolo.classes = "./yolofile/coco.names"
yolo.input_size = 608
yolo.batch_size = 32

yolo.make_model()
yolo.load_weights("./yolofile/yolov4-tiny.conv.29", weights_type="yolo")

train_data_set = yolo.load_dataset(
    "./yolofile/train2017.txt",
    image_path_prefix="/Users/yu.duan/Documents/data/coco_dataset/train2017",
    label_smoothing=0.05)
val_data_set = yolo.load_dataset(
    "./yolofile/val2017.txt",
    image_path_prefix=" /Users/yu.duan/Documents/data/coco_dataset/val2017",
    training=False)

epochs = 400
lr = 1e-4

optimizer = optimizers.Adam(learning_rate=lr)
yolo.compile(optimizer=optimizer, loss_iou_type="ciou")


def lr_scheduler(epoch):
    if epoch < int(epochs * 0.5):
        return lr
Esempio n. 7
0
""" 
export model as keras, onnx and edge tpu model

onnx export requires tensorflow-onnx package
"""

import tensorflow as tf
import subprocess
from yolov4.tf import YOLOv4

MODEL_PATH = "tiny_yolov4_relu/"
WEIGHT_PATH = "yolov4-tiny-relu.weights"
DATASET_PATH = "path/to/dataset"

# create model
yolov4 = YOLOv4(tiny=True, tpu=True)
yolov4.classes = "dataset/coco.names"
yolov4.make_model(activation1="relu")
yolov4.load_weights(WEIGHT_PATH, weights_type="yolo")

# save as keras model
yolov4.model.save(MODEL_PATH)

# save as onnx model
try:
    subprocess.run(
        [
            "python",
            "-m",
            "tf2onnx.convert",
            "--opset",
Esempio n. 8
0
    parser.add_argument('--save-video',
                        action='store_true',
                        help='Create a video file with the analysis result.')

    return parser.parse_args()


if __name__ == '__main__':
    args = parse_args()

    # Init the object tracker
    tracker = CentroidTracker()

    # Init the model
    yolo = YOLOv4(isTiny(bytes(args.weight, encoding="utf-8")))
    yolo.classes = "../darknet_files/dataset/classes.names"
    yolo.make_model()
    yolo.load_weights(bytes(args.weight, encoding="utf-8"),
                      weights_type="yolo")

    cap = cv.VideoCapture(args.video)
    bar = Bar('Processing Frames', max=int(cap.get(cv.CAP_PROP_FRAME_COUNT)))
    width = int(cap.get(3))
    height = int(cap.get(4))

    if (not cap.isOpened()):
        print('Error opening video stream!')
        exit()

    print(f'\nAnalysing {args.video}...')