Example #1
0
from darkflow.net.build import TFNet
import cv2

options = {
    "pbLoad": "yolo-voc-afed.pb",
    "metaLoad": "yolo-voc-afed.meta",
    "threshold": 0.5,
    "gpu": 0.6
}

tfnet = TFNet(options)

imgcv = cv2.imread("r.jpg")
results = tfnet.return_predict(imgcv)
print(results)
colorMap = tfnet.meta['colors']
for res in results:
    left = res['topleft']['x']
    top = res['topleft']['y']
    right = res['bottomright']['x']
    bottom = res['bottomright']['y']
    #colorIndex = res['coloridx']
    #color = colorMap[colorIndex]
    label = res['label']
    confidence = res['confidence']
    imgHeight, imgWidth, _ = imgcv.shape
    thick = int((imgHeight + imgWidth) // 300)

    cv2.rectangle(imgcv, (left, top), (right, bottom), (0, 0, 255), thick)
    cv2.putText(imgcv, label, (left, top - 12), 0, 1e-3 * imgHeight,
                (0, 0, 255), thick // 3)
def compute_YOLO_Perf_Paintings():
    classes_paitings = ['aeroplane','bird','boat','chair','cow','diningtable','dog','horse','sheep','train']
    path_to_img = '/media/gonthier/HDD/data/Painting_Dataset/'
    database = 'Paintings'
    databasetxt = database + '.txt'
    df_label = pd.read_csv(databasetxt,sep=",")
    df_test = df_label[df_label['set']=='test']
    sLength = len(df_test['name_img'])
    name_img = df_test['name_img'][0]
    i = 0
    y_test = np.zeros((sLength,10))
    path_model ='/media/gonthier/HDD/models/yolo/'

    for model in NETS_Pretrained:
        print(model)
        if model=='yolo-voc' or model=='yolo-full':
            CLASSES = CLASSES_SET['VOC']
        elif model=='yolo':
            CLASSES = CLASSES_SET['COCO']
        nbClasses = len(CLASSES)
        
        cfg = path_model + model + ".cfg"
        weights = path_model + model +".weights"
        options = {"model": cfg, "load": weights, "threshold": 0.1,
                   "gpu" : 1.0}
        tfnet = TFNet(options)
        
        scores_all_image = np.zeros((len(df_test),nbClasses))
        
        for i,name_img in  enumerate(df_test['name_img']):
            if i%1000==0:
                print(i,name_img)
            complet_name = path_to_img + name_img + '.jpg'
            im = cv2.imread(complet_name)
            result = tfnet.return_predict_gonthier(im) # Arguments: im (ndarray): a color image in BGR order
            if(model=='yolo-full'):
                C,B,S = 20,2,7
                probs = get_probs(result, C, B, S)
                probs_per_classe = np.max(probs,axis=0)
            if(model=='yolo-voc'):
                C,B= 20,5
                H = 13
                W = 13
                probs = get_probs_v2(result,H,W,C,B)
                probs_per_classe = np.max(probs,axis=(0,1,2))
            if(model=='yolo'):
                C,B= 80,5
                H = 19
                W = 19
                probs = get_probs_v2(result,H,W,C,B)
                probs_per_classe = np.max(probs,axis=(0,1,2))
            scores_all_image[i,:] = probs_per_classe
            for j in range(10):
                if(classes_paitings[j] in list(df_test['classe'][df_test['name_img']==name_img])[0]):
                    y_test[i,j] = 1
            
        AP_per_class = []
        for k,classe in enumerate(classes_paitings):
            index_classe = np.where(np.array(CLASSES)==classe)[0][0]
            scores_per_class = scores_all_image[:,index_classe]
            #print(scores_per_class)
            #print(y_test[:,k],np.sum(y_test[:,k]))
            AP = average_precision_score(y_test[:,k],scores_per_class,average=None)
            AP_per_class += [AP]
            print("Average Precision for",classe," = ",AP)
        print(model," mean Average Precision = {0:.3f}".format(np.mean(AP_per_class)))
Example #3
0
def Help():
    print("help tab")
    os.startfile('help.txt')


expression = None
predictions = None
components = None
path = ""
options = {
    "model": "cfg/tiny-yolo-voc-3c.cfg",
    "load": 8625,
    "gpu": 1.0,
    "threshold": 0.45
}
tfnet2 = TFNet(options)

# optimisations for improved response time.

if __name__ == '__main__':

    root = tkinter.Tk()
    root.title("DLC Rebuilder")
    root.minsize(1060, 600)
    root.geometry("1060x550+50+20")
    root.config(background='white')

    fontText = font.Font(size=12, weight='bold')
    fontTitle = font.Font(size=24, weight='bold')

    menu = Menu(root)
Example #4
0
    FLAGS.parseArgs(sys.argv)

    def _get_dir(dirs):
        for d in dirs:
            this = os.path.abspath(os.path.join(os.path.curdir, d))
            if not os.path.exists(this): os.makedirs(this)

    requiredDirectories = [
        FLAGS.imgdir, FLAGS.binary, FLAGS.backup,
        os.path.join(FLAGS.imgdir, 'out')
    ]
    if FLAGS.summary:
        requiredDirectories.append(FLAGS.summary)

    _get_dir(requiredDirectories)
    tfnet = TFNet(FLAGS)

    model = masknet.create_model()
    model.summary()
    model.load_weights("weights.hdf5")

    file = FLAGS.demo
    SaveVideo = True

    if file == 'camera':
        file = 0
    else:
        pass
        #assert os.path.isfile(file), \
        #'file {} does not exist'.format(file)
import imutils
from darkflow.net.build import TFNet
import random
import time
from flask import Flask, render_template, request, flash, request, redirect, url_for, jsonify
import json
import requests
from statistics import mode

# used to detect plate using yolo model
options = {
    "pbLoad": "Plate_recognition_weights/yolo-plate.pb",
    "metaLoad": "Plate_recognition_weights/yolo-plate.meta",
    "gpu": 0.9
}
yoloPlate = TFNet(options)

# used to detect characters on number plate
options = {
    "pbLoad": "Character_recognition_weights/yolo-character.pb",
    "metaLoad": "Character_recognition_weights/yolo-character.meta",
    "gpu": 0.9
}
yoloCharacter = TFNet(options)

characterRecognition = tf.keras.models.load_model('character_recognition.h5')


# function that returns the cropped  detection with the highest confidence (last in confidence sorted list)
# draws rectangle around the highest confidence of license plate detecttions given to it
def firstCrop(img, predictions):
#!/usr/bin/env python3
import io
import subprocess

import numpy as np
import cv2
import tensorflow as tf

from darkflow.net.build import TFNet

image = '../sample_img/sample_computer.jpg'
cnn_c_file = "../vivado/cnn/cnn.sdk/cnnGeneral/src/image.h"
cnn_software_c_file = "../vivado/cnn_software/cnn_software.sdk/cnn_software/src/image.h"

tfnet = TFNet({"model": "tiny-yolo-voc.cfg", "load": "tiny-yolo-voc.weights"})

last_layer_size = 11


def resize_input(im):
    w = 32 * last_layer_size
    h = w
    imsz = cv2.resize(im, (w, h))
    imsz = imsz[:, :, ::-1]
    return imsz


def number_array(values):
    return "{" + ", ".join(map(str, values)) + "}"

Example #7
0
def dectect_cars_per_frame(cfg, weights, im):
    #demo purpose
    init_url = 'https://api.belairdirect.com/quickquote-blr/initDataContext?company=BELAIR&province=QC&language=EN&distributor=BEL&platform=desktop&partnershipId='
    headers = {}
    headers['Origin'] = 'https://apps.belairdirect.com'
    headers[
        'Referer'] = 'https://apps.belairdirect.com/quick-quote/desktop/index.html?prov=qc&lang=en&intcid=homepage'
    params = {}
    params['distributor'] = 'BEL'
    params['company'] = 'BELAIR'
    params['province'] = 'QC'
    params['language'] = 'EN'
    params['emailAddress'] = ''
    params['licenceNumber'] = ''
    params['firstNameLicence'] = ''
    params['year'] = '2012'
    #params['make'] = 'HONDA'
    #params['model'] = '021001'
    params['distanceWorkSchool'] = '10'
    params['annualKm'] = '14000'
    params['gender'] = 'M'
    params['firstName'] = 'test'
    params['lastName'] = 'test'
    params['dateOfbirthYear'] = '1996'
    params['dateOfbirthMonth'] = '12'
    params['dateOfbirthDay'] = '12'
    params['homePhoneNumber'] = '514-555-1212'
    params['postalCode'] = 'H2W 1X9'
    params['firstLicencedAt'] = '18'
    params['yearsWithCurrentInsurer'] = '1'
    params['marketingConsent'] = 'false'
    params['creditScore'] = '0'
    params['otherAntiTheftDeviceIndicator'] = 'false'
    valid_makes = [
        'VOLKSWAGEN', 'FORD', 'DODGE', 'HONDA', 'TOYOTA', 'TOYOTA', 'MAZDA',
        'BMW', 'NISSAN', 'HYUNDAI'
    ]
    valid_models = [
        'Golf', 'F150', 'Grand Caravan', 'CR-V', 'RAV-4', 'Camry', 'MAZDA3',
        '328i', 'Altima', 'Elantra'
    ]
    valid_code = [
        '968900', '355801', '266200', '027101', '755700', '045002', '758600',
        '903501', '091005', '052806'
    ]
    qq_price = {}
    #since we only use once
    mock_premium = [
        '2611.00', '3171.00', '2286.00', '2825.00', '2872.00', '3036.00',
        '2420.00', '3701.00', '2657.00', '3204.00'
    ]
    index = 0
    for model in valid_models:
        #resp = requests.get(url=init_url,headers=headers)
        #policyVersionId =json.loads(resp.content.decode("utf-8"))['body']['policyVersionId']
        #params['policyVersionId'] = policyVersionId
        params['make'] = valid_makes[index]
        params['model'] = valid_code[index]
        #response = requests.get(url='https://api.belairdirect.com/quickquote-blr/getPrice',
        #    params=params, headers=headers)
        #premium = json.loads(response.content.decode("utf-8"))['body']['vehicles'][0]['offers']['CUSTOM']['offerDetails']['priceYearly']
        #print(valid_makes[index] +valid_code[index] + ':' + premium)
        #qq_price[valid_models[index]] = premium

        qq_price[valid_models[index]] = mock_premium[index]
        index += 1

    options = {"model": cfg, "load": weights, "threshold": 0.6}

    #loading yolo model
    tfnet = TFNet(options)

    imgcv = im

    boxes = tfnet.return_predict(imgcv)
    print(boxes)

    h, w, _ = imgcv.shape
    car_list = []
    car_num = 1
    for b in boxes:
        left = b['topleft']['x']
        right = b['bottomright']['x']
        top = b['topleft']['y']
        bot = b['bottomright']['y']
        mess = b['label']
        confidence = b['confidence']
        thick = int((h + w) // 300)

        #output the image box
        response = 'Cannot recognize a car.'
        print_text = 'null'
        if mess == 'car':
            crop = imgcv[top:bot, left:right]
            car_path = 'test/out/tmp/tmp.jpg'
            cv2.imwrite(car_path, crop)
            car_num += 1
            #post, getting result from car model
            with open(car_path, "rb") as image_file:

                encoded_string = base64.b64encode(image_file.read())
                url = 'http://52.168.131.37:8080/predict/car_model2.0'
                data = encoded_string
                headers = {}
                headers['Content-Type'] = 'application/json'
                resp = requests.post(url=url,
                                     data=json.dumps(data.decode("utf-8")),
                                     headers=headers)
                response = json.loads(
                    resp.content.decode("utf-8"))['top5_results']['top1']
                #response = json.loads(resp.content.decode("utf-8"))['Make'] + ' ' + json.loads(resp.content.decode("utf-8"))['Model'] + ':'
                #year_list = json.loads(resp.content.decode("utf-8"))['Year']
                #response += year_list[-1]
            os.remove(car_path)
            #car_list.append(json.loads(resp.content.decode("utf-8"))['Make'] + ' ' + json.loads(resp.content.decode("utf-8"))['Model'])
            text = response['make'] + ' ' + response['model'] + ':' + response[
                'prob']
            car_list.append(text)
            #demo purpose
            prob_of_car = float(response['prob'])
            #draw rec and text
            if response['model'] not in valid_models:
                cv2.rectangle(imgcv, (left, top), (right, bot), (128, 127, 77),
                              thick // 2)
                cv2.putText(imgcv, 'car', (left, top - 12), 0,
                            1e-3 * h * 2 / 3, (128, 127, 77), thick // 5)
                continue
            print_text = response['make'] + ' ' + response[
                'model'] + ': $' + qq_price[response['model']]
            if prob_of_car > 0.95:
                cv2.rectangle(imgcv, (left, top), (right, bot), (56, 254, 0),
                              thick // 2)
                cv2.putText(imgcv, print_text, (left, top - 12), 0,
                            1e-3 * h * 2 / 3, (56, 254, 0), thick // 5)
            elif prob_of_car < 0.7:
                cv2.rectangle(imgcv, (left, top), (right, bot), (128, 127, 77),
                              thick // 2)
                cv2.putText(imgcv, 'car', (left, top - 12), 0,
                            1e-3 * h * 2 / 3, (128, 127, 77), thick // 5)

            else:
                cv2.rectangle(imgcv, (left, top), (right, bot),
                              (255, 255, 153), thick // 2)
                cv2.putText(imgcv, print_text, (left, top - 12), 0,
                            1e-3 * h * 2 / 3, (255, 255, 153), thick // 5)
        else:
            print("only support cars")
            return

    video_output = 'test/out/video.txt'
    with open(video_output, 'a') as f:
        f.write(json.dumps(car_list) + '\t\n')

    #return image if in video
    return imgcv

    # save the image if you test one single image

    #img_name = os.path.join(outfolder, im.split('/')[-1])
    img_name = 'test/out/image.jpg'
    cv2.imwrite(img_name, imgcv)
Example #8
0
import imutils

app = Flask(__name__)

# ajax 통신 변수
tem_message = "temporary"
final_message = "prediction result"

# 손 detect 모델
options_hand = {
    "model": "./cfg/yolo-hands.cfg",
    "load": "./bin/yolo-hand-detect.weights",
    "threshold": 0.6
}

tfnet_hand = TFNet(options_hand)

# 수화번역 model
options_signLanguage = {
    "model": "./cfg/handlang-small.cfg",
    "pbLoad": "./darkflow/built_graph/handlang-small.pb",
    "metaLoad": './darkflow/built_graph/handlang-small.meta',
    "threshold": 0.15
}
tfnet_detect = TFNet(options_signLanguage)


def gen(camera):
    sess = tf.Session()
    with sess.as_default():
        while True:
Example #9
0
 def preconnect(self):
     self.tf = TFNet(self.getOption(self.option))
     ones_img = np.ones((600, 800, 3), np.uint8)
     self.result = self.tf.return_predict(ones_img)
Example #10
0
 def connectTf(self):
     self.tf = TFNet(self.getOption(self.option))
class traker:

    options = {"model": "cfg/yolo.cfg", "load": "bin/yolo.weights", "threshold": 0.5, "gpu":0.3}  # threshold가 신뢰율임
    tfnet = TFNet(options)  # JSON

    # 사실상 얘는 안쓰는 함수임
    def boxing(self, original_img, predictions):
        newImage = np.copy(original_img)

        for result in predictions:
            if result['label'] != 'car':
                continue
            top_x = result['topleft']['x']
            top_y = result['topleft']['y']

            btm_x = result['bottomright']['x']
            btm_y = result['bottomright']['y']

            confidence = result['confidence']
            label = result['label'] + " " + str(round(confidence, 3))

            if confidence > 0.25:
                newImage = cv2.rectangle(newImage, (top_x, top_y), (btm_x, btm_y), (255, 0, 0), 3)
                newImage = cv2.putText(newImage, label, (top_x, top_y - 5), cv2.FONT_HERSHEY_COMPLEX_SMALL, 0.8,
                                       (0, 230, 0), 1, cv2.LINE_AA)

        return newImage


    # 얘 쓰는 함수임
    def getPoint(self, predictions):
        bboxes = []
        for result in predictions:
            if result['label'] != 'car':
                continue
            top_x = result['topleft']['x']
            top_y = result['topleft']['y']

            btm_x = result['bottomright']['x']
            btm_y = result['bottomright']['y']

            score = result['confidence']  # 신뢰율

            bbox = [top_x, top_y, btm_x, btm_y, score]  # 5개까지만 sort가 읽음
            bboxes.append(bbox)

        bboxes = np.array(bboxes)  # sort 적용하려면 numpy array로 넣어줘야함 (5개)

        return bboxes

    def id_box(self, image, boxes):
        image = np.copy(image)

        for box in boxes:
            top_x = int(box[0])
            top_y = int(box[1])
            btm_x = int(box[2])
            btm_y = int(box[3])

            track_id = str(int(box[4]))

            image = cv2.rectangle(image, (top_x, top_y), (btm_x, btm_y), (255, 255, 255), 3)
            image = cv2.putText(image, track_id, (top_x, top_y - 5), cv2.FONT_HERSHEY_COMPLEX_SMALL, 0.8, (255, 255, 255),1)

        return image
Example #12
0
min_overlap = 0.3
length_of_car_in_meter = 4.8
bezel_boundary = 140
outlier_threshold = 0.5
options = {
    'model': 'cfg/yolo.cfg',
    'load': 'weights/yolo.weights',
    'threshold': 0.2,
    'gpu': 1.0
}
active_trackers_list = list()
all_trackers_list = list()

capture = cv2.VideoCapture(input_video_file)
fps = int(capture.get(cv2.CAP_PROP_FPS))
yolo_object_detection = TFNet(options)
print('--------------------------------------------------------')
while capture.isOpened():
    ok, image = read_new_image()
    if not ok:
        print("Could not read file")
        break
    Variables.frame_counter += 1
    if Variables.frame_counter == 1 and record:
        out = cv2.VideoWriter(output_file_name,
                              cv2.VideoWriter_fourcc('M', 'J', 'P', 'G'), fps,
                              (image.shape[1], image.shape[0]))
    # run yolo to detect objects in this frame
    try:
        result = yolo_object_detection.return_predict(image)
    except Exception: