Exemple #1
0
def process():
    file = request.files['file']
    if file and allowed_file(file.filename):
        filename = secure_filename(file.filename)
        file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))
        adaptive_resize(os.path.join(app.config['UPLOAD_FOLDER'], filename))

        sys.path.append(os.path.join(os.path.dirname(__file__), 'darknet'))
        import darknet.darknet as dn
        #net = dn.load_net(bytes("darknet/cfg/tiny-yolo.cfg", 'ascii'), bytes("darknet/tiny-yolo.weights", 'ascii'), 0)
        net = dn.load_net(bytes("darknet/cfg/yolo.cfg", 'ascii'),
                          bytes("darknet/yolo.weights", 'ascii'), 0)
        meta = dn.load_meta(bytes("darknet/cfg/coco.data", 'ascii'))
        r = dn.detect(net,
                      meta,
                      bytes(app.config['UPLOAD_FOLDER'] + filename, 'ascii'),
                      thresh=.5)
        res = ''
        for object in r:
            name = object[0].decode('utf-8')
            prob = float(object[1])
            rct = object[2]
            res += 'Detected <b>' + name + '</b> with probability of ' + str(
                round(prob, 2))
            #res += ' ' + str(rct)
            res += '<br />'
            draw_object(app.config['UPLOAD_FOLDER'] + filename, rct)
        return render_template('result.html',
                               filename='uploads/' + filename + '?' +
                               str(randint(0, 999)),
                               result_text=res)
    else:
        return 'invalid data'
Exemple #2
0
def init_models(basic_path):
    print("\n [*]Loading object detection model!\n")

    det_net = darknet.load_net(
        str.encode(basic_path) + b"darknet/cfg/yolov3-food.cfg",
        str.encode(basic_path) +
        b"darknet/backup/food/yolov3-food_final.weights", 0)
    det_meta = darknet.load_meta(
        str.encode(basic_path) + b"darknet/cfg/food.data")

    print("\n [*]Loading object classficiation model!\n")

    classes = 231

    base_model = Xception(include_top=True, input_shape=(299, 299, 3))
    base_model.layers.pop()
    predictions = Dense(classes,
                        activation='softmax')(base_model.layers[-1].output)
    clf_model = Model(input=base_model.input, output=[predictions])
    clf_model.load_weights(basic_path +
                           "classification/models/xception-0-15-0.82.h5")

    class_dict = {
        v: k
        for k, v in np.load(basic_path +
                            "classification/class_index/food231.npy")[(
                            )].items()
    }

    return det_net, det_meta, clf_model, class_dict
Exemple #3
0
def model_init():
    net = darknet.load_net(
        "/root/htsc_detect/darknet/htsc_test.cfg".encode('utf-8'),
        "/root/htsc_detect/darknet/htsc.weights".encode('utf-8'), 0)  #模型载入
    meta = darknet.load_meta(
        "/root/htsc_detect/darknet/htsc.data".encode('utf-8'))  #Processor载入
    return net, meta
Exemple #4
0
def load_system():

    try:

        loaded_models = []
        vehicle_threshold = .5

        vehicle_weights = 'data/vehicle-detector/vehicle-detection.weights'
        vehicle_netcfg = 'data/vehicle-detector/vehicle-detection.cfg'
        vehicle_dataset = 'data/vehicle-detector/vehicle-detection.data'

        lp_threshold = .25

        lp_weights = 'data/lp-detector/lpd.weights'
        lp_netcfg = 'data/lp-detector/lpd.cfg'
        lp_dataset = 'data/lp-detector/lpd.data'

        ocr_threshold = .4

        ocr_weights = 'data/cr/cr.weights'
        ocr_netcfg = 'data/cr/cr.cfg'
        ocr_dataset = 'data/cr/cr.data'

        vehicle_net = dn.load_net(vehicle_netcfg.encode('utf-8'),
                                  vehicle_weights.encode('utf-8'), 0)
        vehicle_meta = dn.load_meta(vehicle_dataset.encode('utf-8'))
        loaded_models.append([vehicle_net, vehicle_meta, vehicle_threshold])

        lp_net = dn.load_net(lp_netcfg.encode('utf-8'),
                             lp_weights.encode('utf-8'), 0)
        lp_meta = dn.load_meta(lp_dataset.encode('utf-8'))
        loaded_models.append([lp_net, lp_meta, lp_threshold])

        ocr_net = dn.load_net(ocr_netcfg.encode('utf-8'),
                              ocr_weights.encode('utf-8'), 0)
        ocr_meta = dn.load_meta(ocr_dataset.encode('utf-8'))
        loaded_models.append([ocr_net, ocr_meta, lp_threshold])

        return loaded_models

    except:
        traceback.print_exc()
        sys.exit(1)

    sys.exit(0)
Exemple #5
0
def patch_predictor(gpu, i_queue, o_queue):

    os.environ["CUDA_VISIBLE_DEVICES"] = gpu

    # initialize yolov3 model
    thresh = 0.1
    hier_thresh = 0.5
    nms = 0.45

    #     config_file = "/home/ssd_array0/Develop/liyu/darknet/cfg/gnet2.net".encode('utf-8')
    #     weights_file = "/home/ssd_array0/Develop/liyu/darknet/backup/gnet2/gnet2_200000.weights".encode('utf-8')
    #     datacfg_file = "/home/ssd_array0/Develop/liyu/darknet/cfg/gnet2.data".encode('utf-8')

    try:
        net = load_net(config_file, weights_file, 0)
        meta = load_meta(datacfg_file)
    except:
        print("[ERROR] failed to load yolov3 model")
        return

    while True:
        item = i_queue.get()

        patch = item[1]
        predictions = detect_numpy(net, meta, patch, thresh, hier_thresh, nms)

        predictions_ = []
        for pred in predictions:
            label = pred[0]
            probability = pred[1]
            cx, cy, w, h = pred[2]
            x, y = int(cx - w / 2), int(cy - h / 2)
            w, h = int(w), int(h)
            w, h = min(w, w + x), min(h, h + y)
            x, y = max(0, x), max(0, y)
            predictions_.append([label, probability, (x, y, w, h)])

        o_queue.put(
            (item[0], predictions_, item[2]))  # (txt_name, predictions, N)

        del item
Exemple #6
0
from darknet import darknet
import cv2, numpy as np
# 파이썬에서 dll 로딩하여 제공하는 함수 호출 가능하게 하는 모듈
from ctypes import *

#1. 사용할 변수 선언
config_path = "C:/Users/bitcamp/anaconda3/Lib/site-packages/darknet/train/my_yolov3.cfg"
weigh_path = "C:/Users/bitcamp/anaconda3/Lib/site-packages/darknet/train/my_yolov3_final.weights"
meta_path = "C:/Users/bitcamp/anaconda3/Lib/site-packages/darknet/train/my_data.data"
video_path = "C:/Users/bitcamp/anaconda3/Lib/site-packages/darknet/data/video/16-3_cam01_assault01_place02_night_spring.mp4"
threshold = 0.25

#2. 관련 파일 load
# cfg(모델관련)와 weight 파일 아스키코드로 load
net = darknet.load_net(bytes(config_path, "ascii"), bytes(weigh_path, "ascii"),
                       0)
# data(classes, train, valid, names, backup의 경로가 명시된 텍스트파일)  아스키코드로 load
meta = darknet.load_meta(bytes(meta_path, "ascii"))
# 비디오의 프레임을 추출하는 VideoCapture 함수
cap = cv2.VideoCapture(video_path)

print(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
print(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))

#3. opencv로 darknet 실행
# 비디오 캡쳐 객체가 정상으로 open 되는 동안 반복
while (cap.isOpened()):
    # 비디오의 한 프레임씩 읽기
    # 제대로 읽으면 ret=True, 읽은 프레임은 image 변수
    ret, image = cap.read()
    # resize하기 위해서는 픽셀 사이의 값을 결정. INTER_AREA=사이즈를 줄이기 위한 보간법
from src.utils import nms

if __name__ == '__main__':

    try:

        input_dir = sys.argv[1]
        output_dir = input_dir

        ocr_threshold = .4

        ocr_weights = 'data/cr/cr.weights'
        ocr_netcfg = 'data/cr/cr.cfg'
        ocr_dataset = 'data/cr/cr.data'

        ocr_net = dn.load_net(ocr_netcfg.encode('utf-8'),
                              ocr_weights.encode('utf-8'), 0)
        ocr_meta = dn.load_meta(ocr_dataset.encode('utf-8'))

        imgs_paths = sorted(glob('%s/*lp.png' % output_dir))

        print('Performing Character Recognition...')

        for i, img_path in enumerate(imgs_paths):

            print('\tScanning %s' % img_path)

            bname = basename(splitext(img_path)[0])

            R, (width, height) = detect(ocr_net,
                                        ocr_meta,
                                        img_path.encode('utf-8'),
Exemple #8
0
            i.time = 0
            i.index = -1
        elif i.state == 1:
            if findList(indices, i.index) == -1:
                i.state = 2
                i.time = time.time()


if __name__ == '__main__':
    tracker = Sort()
    colors = [(random.randrange(0, 255), random.randrange(0, 255),
               random.randrange(0, 255)) for i in range(10)]

    dn.set_gpu(0)
    net = dn.load_net(
        "/home/bardoe/sources/sceneLight/model/yolov3.cfg".encode("utf-8"),
        "/home/bardoe/sources/sceneLight/model/yolov3.weights".encode("utf-8"),
        0)
    meta = dn.load_meta(
        "/home/bardoe/sources/sceneLight/model/coco.data".encode("utf-8"))
    cap = cv2.VideoCapture(0)

    frames = 0
    start = time.time()
    sender.start()
    sender.activate_output(1)
    sender[1].multicast = True

    while cap.isOpened():
        if frames > 100:
            frames = 0
            start = time.time()
Exemple #9
0
from darknet import darknet
import cv2, numpy as np, sys
from ctypes import *

net = darknet.load_net(
    b"C:/Users/bitcamp/anaconda3/Lib/site-packages/darknet/cfg/myyolov3.cfg",
    b"C:/Users/bitcamp/anaconda3/Lib/site-packages/darknet/weight/myyolov3_final.weights",
    0)
meta = darknet.load_meta(
    b"C:/Users/bitcamp/anaconda3/Lib/site-packages/darknet/weight/my.data")
cap = cv2.VideoCapture(
    0
)  #"C:/Users/bitcamp/anaconda3/Lib/site-packages/darknet/26-4_cam01_assault01_place01_night_spring.mp4")
print(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
print(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
i = 0
while (cap.isOpened()):
    i += 1
    ret, image = cap.read()
    image = cv2.resize(image, dsize=(640, 480), interpolation=cv2.INTER_AREA)
    print(i)
    if not ret:
        break
    frame = darknet.nparray_to_image(image)
    r = darknet.detect_image(net,
                             meta,
                             frame,
                             thresh=.5,
                             hier_thresh=.5,
                             nms=.45,
                             debug=False)
Exemple #10
0
from darknet import darknet
import cv2, numpy as np, sys
from ctypes import *

net = darknet.load_net(
    b"D:/python_module/darknet-master/build/darknet/x64/project/myyolov3.cfg",
    b"D:/python_module/darknet-master/build/darknet/x64/project/backup/myyolov3_final.weights",
    0)
meta = darknet.load_meta(
    b"D:/python_module/darknet-master/build/darknet/x64/project/my.data")
cap = cv2.VideoCapture(
    "D:/python_module/darknet-master/build/darknet/x64/project/22-2.mp4")
fourcc = cv2.VideoWriter_fourcc(*'DIVX')
fps = cap.get(cv2.CAP_PROP_FPS)
out = cv2.VideoWriter('otter_out26-4_test.avi', fourcc, fps, (640, 480))
print(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
print(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))

i = 0
while (cap.isOpened()):
    i += 1
    ret, image = cap.read()
    image = cv2.resize(image, dsize=(640, 480), interpolation=cv2.INTER_AREA)
    print(i)
    if not ret:
        break
    frame = darknet.nparray_to_image(image)
    r = darknet.detect_image(net,
                             meta,
                             frame,
                             thresh=.5,
Exemple #11
0
from glob import glob

if __name__ == '__main__':

    try:

        input_dir = sys.argv[1]
        output_dir = input_dir

        lp_threshold = .25

        lp_weights = 'data/lp-detector/lpd.weights'
        lp_netcfg = 'data/lp-detector/lpd.cfg'
        lp_dataset = 'data/lp-detector/lpd.data'

        lp_net = dn.load_net(lp_netcfg.encode('utf-8'),
                             lp_weights.encode('utf-8'), 0)
        lp_meta = dn.load_meta(lp_dataset.encode('utf-8'))

        imgs_paths = glob('%s/*car.png' % input_dir)

        print('Searching for license plates...')

        for i, img_path in enumerate(imgs_paths):

            print('\t Processing %s' % img_path)

            bname = splitext(basename(img_path))[0]

            R, _ = detect(lp_net,
                          lp_meta,
                          img_path.encode('utf-8'),
from darknet.darknet import detect

if __name__ == '__main__':

    try:

        input_dir = sys.argv[1]
        output_dir = sys.argv[2]

        vehicle_threshold = .5

        vehicle_weights = 'data/vehicle-detector/vehicle-detection.weights'
        vehicle_netcfg = 'data/vehicle-detector/vehicle-detection.cfg'
        vehicle_dataset = 'data/vehicle-detector/vehicle-detection.data'

        vehicle_net = dn.load_net(vehicle_netcfg.encode('utf-8'),
                                  vehicle_weights.encode('utf-8'), 0)
        vehicle_meta = dn.load_meta(vehicle_dataset.encode('utf-8'))

        imgs_paths = image_files_from_folder(input_dir)
        imgs_paths.sort()

        if not isdir(output_dir):
            makedirs(output_dir)

        print('Searching for vehicles...')

        for i, img_path in enumerate(imgs_paths):

            print('\tScanning %s' % img_path)

            bname = basename(splitext(img_path)[0])
 def __init__(self, gpu, cfg, weights, data):
     darknet.set_gpu(gpu)
     self.net = darknet.load_net(str.encode(cfg), str.encode(weights), 0)
     self.meta = darknet.load_meta(str.encode(data))
Exemple #14
0
def to_box(r):
    boxes = []
    scores = []
    for rc in r:
        if rc[0] == b'text':
            cx, cy, w, h = rc[-1]
            scores.append(rc[1])
            prob = rc[1]
            xmin, ymin, xmax, ymax = cx - w / 2, cy - h / 2, cx + w / 2, cy + h / 2
            boxes.append([int(xmin), int(ymin), int(xmax), int(ymax)])
    return boxes, scores


import pdb
if GPU:
    try:
        dn.set_gpu(GPUID)
    except:
        pass
net = dn.load_net(yoloCfg.encode('utf-8'), yoloWeights.encode('utf-8'), 0)
meta = dn.load_meta(yoloData.encode('utf-8'))
os.chdir(pwd)


def text_detect(img):

    r = detect_np(net, meta, img, thresh=0, hier_thresh=0.5,
                  nms=None)  ##输出所有box,与opencv dnn统一
    bboxes = to_box(r)
    return bboxes