#https://www.mygreatlearning.com/blog/face-recognition/

import cv2
import os
from keras.models import load_model
from imageai.Detection import ObjectDetection

h = 300
w = 150

execution_path = os.getcwd()
detector = ObjectDetection()
detector.setModelTypeAsYOLOv3()
detector.setModelPath(os.path.join(execution_path, "yolo.h5"))
detector.loadModel()

print("Streaming started")
# video_capture = cv2.VideoCapture(0)
model = load_model("version3.h5")
# while True:
# grab the frame from the threaded video stream
# ret, orig = video_capture.read()
# orig = cv2.resize(orig,(500,500))
orig = cv2.imread(
    "/home/uchiha/Desktop/Dress_code/Dress_code_classifier_model/testImage/Screenshot from 2021-03-15 19-51-36.png"
)
orig, detections = detector.detectObjectsFromImage(input_type="array",
                                                   input_image=orig,
                                                   output_type="array")
if detections:
    for eachObject in detections:
Esempio n. 2
0
from imageai.Detection import ObjectDetection
detector = ObjectDetection()
model_path = "model/yolo-tiny.h5"
input_path = "data/1.jpg"
output_path = "Output/1.jpg"
detector.setModelTypeAsTinyYOLOv3()
detector.setModelPath(model_path)
detector.loadModel()
detection = detector.detectObjectsFromImage(input_image=input_path,
                                            output_image_path=output_path)
for eachItem in detection:
    print(eachItem["name"], " : ", eachItem["percentage_probability"])
Esempio n. 3
0
from imageai.Detection import ObjectDetection

threshold = 70

detector = ObjectDetection()
detector.setModelTypeAsYOLOv3()
detector.setModelPath("modules/image_classificator/yolo.h5")
detector.loadModel()


def on_image(update, context):
    file = context.bot.getFile(update.message.photo[1].file_id)
    file.download('image.jpg')
    detections = detector.detectObjectsFromImage(
        input_image="image.jpg",
        output_image_path="image_result.jpg",
        minimum_percentage_probability=threshold)
    text = ', '.join([image_object["name"] for image_object in detections])
    context.bot.send_message(chat_id=update.effective_chat.id, text=text)
    context.bot.send_photo(chat_id=update.effective_chat.id,
                           photo=open('image_result.jpg', 'rb'))


def set_threshold_handler(update, context):
    threshold = 40
    context.bot.send_message(chat_id=update.effective_chat.id,
                             text="Установила")
Esempio n. 4
0
import json
import os
import statistics as s
import time
from os import listdir

import cv2
from scipy.spatial import distance

from imageai.Detection import ObjectDetection

execution_path = os.getcwd()

detectorY = ObjectDetection()
detectorY.setModelTypeAsYOLOv3()
detectorY.setModelPath(
    "/Users/stuartrobinson/repos/computervision/ImageAI/gitignore/yolo.h5")
detectorY.loadModel()


def translateCropCoordToOrig(xy, cropCoords):
    return [xy[0] + cropCoords[0], xy[1] + cropCoords[1]]


def distanceBetween(box_points, ballXY):
    print("in distanceBetween, ", box_points, ", ", ballXY)
    boxX = s.mean([box_points[0], box_points[2]])
    boxY = s.mean([box_points[1], box_points[3]])
    return distance.euclidean((boxX, boxY), (ballXY[0], ballXY[1]))


def translateCropBoxPointsToOrig(XYs, cropCoords):
Esempio n. 5
0
def open_cam(selcam):
    cam = cv2.VideoCapture(selcam)

    ic = 0
    while True:
        test, frame = cam.read()
        cv2.imshow("PRESS SPACE TO CAPTURE AND ESC TO CLOSE", frame)
        if not test:
            break
        k = cv2.waitKey(1)

        if k % 256 == 27:
            # ESC pressed
            print("Escape hit, closing...")
            messagebox.showinfo(
                "Processing",
                "No pictures = %d\n Click to process further!\nPLEASE BE PATIENT TILL THE PROCESS COMPLETES"
                % ic)
            break
        elif k % 256 == 32:
            # SPACE pressed
            img_name = "img\\image_%d.jpg" % ic
            ic = ic + 1
            cv2.imwrite(img_name, frame)
            print("{} written!".format(img_name))

    cam.release()

    cv2.destroyAllWindows()

    ################################################################

    from imageai.Detection import ObjectDetection
    import os
    st = time.time()
    execution_path = os.getcwd()
    obj = {}
    detector = ObjectDetection()
    detector.setModelTypeAsRetinaNet()
    detector.setModelPath(
        os.path.join(execution_path, "resnet50_coco_best_v2.0.1.h5"))
    detector.loadModel()

    count = 0
    for i in range(0, ic):
        detections, extracted = detector.detectObjectsFromImage(
            input_image=os.path.join(execution_path, "img\\image_%d.jpg" % i),
            output_image_path=os.path.join(execution_path,
                                           "img\\imagenew_%d.jpg" % i),
            extract_detected_objects=True)
        df = pd.DataFrame(detections)
        #         df1=pd.DataFrame(extracted)
        df["Extracted Img"] = extracted
        print(df)

        #creating excel sheet

        print("img\\image_%d.jpg" % i)
        base = os.path.basename("img\\image_%d.jpg" % i)
        writer = pd.ExcelWriter(r'countexcel/%s.xlsx' % base,
                                engine='xlsxwriter')
        df.to_excel(writer, sheet_name='Sheet1')
        writer.save()
        print(base)

        for eachObject in detections:
            if eachObject["name"] == "person":
                count = count + 1
            print(eachObject["name"], " : ",
                  eachObject["percentage_probability"])
            obj[eachObject["name"]] = eachObject["percentage_probability"]
    print("No of person is %d" % count)
    en = time.time()
    total = en - st
    print(total)
    #print('\n'.join(obj))

    #     for i in range(0, ic):
    #os.remove(os.path.join(execution_path , r"img/image_%d.jpg" %i))
    #os.remove(os.path.join(execution_path , r"img/imagenew_%d.jpg" %i))
    return count, ic, obj
Esempio n. 6
0
from imageai.Detection import ObjectDetection
import numpy as np
import matplotlib.pyplot as plt


detector = ObjectDetection()
detector.setModelTypeAsRetinaNet()

detector.setModelPath("/home/juliosilva/Desktop/Tese/Redes_Neurais/ObjectRecognition/retinaNet.h5")
detector.loadModel()

detections = detector.detectObjectsFromImage(input_image="/home/juliosilva/Desktop/Tese/u2/photo.jpg", output_image_path="/home/juliosilva/Desktop/Tese/u2/retinaNet.jpg", minimum_percentage_probability=30)


bar_width = 0.3

print("")
print(" ")
print(" ------------------------------ Results Retina --------------------------------")

a = plt.figure('RetinaNet Detection')
plt.title('RetinaNet', fontsize = 16)
plt.xlabel('Predictions',fontsize = 12)
plt.ylabel('Percentage of certainty', fontsize = 12)

names = []
probability = []


for eachObject in detections:
    print(eachObject["name"] , " : ", eachObject["percentage_probability"], " : ", eachObject["box_points"] )
Esempio n. 7
0
File: w.py Progetto: TanishB/EESL
import warnings
warnings.filterwarnings(action="ignore", message="^internal gelsd")


from imageai.Detection import ObjectDetection
import os
import pickle
import numpy as np

model = pickle.load(open('modelGud.pkl', 'rb'))
detector = ObjectDetection()
detector.setModelTypeAsTinyYOLOv3()
detector.setModelPath('./yolo-tiny.h5')
detector.loadModel()
custom = detector.CustomObjects(car = True)
path = "./electricStations"
evList = []
for i in os.listdir(path):
    if i.endswith('jpg'):
        evList.append(i)

def customPrice(electricStation):
    evPath = './electricStations/' + electricStation
    detections = detector.detectCustomObjectsFromImage(custom_objects=custom , input_image = evPath ,output_image_path = './output/' + electricStation + '.jpg', minimum_percentage_probability=30)
    numberOfCars = 0
    for car in detections:
        numberOfCars += 1
    #print(numberOfCars)
    time = np.random.randint(1,13)
    dayNight = np.random.randint(0,2)
    chargingPoints = 6
Esempio n. 8
0
from imageai.Detection import ObjectDetection
import os

execution_path = os.getcwd()

detector = ObjectDetection()
detector.setModelTypeAsRetinaNet()
detector.setModelPath(
    os.path.join(
        execution_path,
        "C:\\Users\\Admin_Robo\\Desktop\\project\\models\\resnet50_coco_best_v2.0.1.h5"
    ))
detector.loadModel(
)  ## NOTE: 1 секунда, 5 объектов, самое быстрое для распознавания.
print('LOADED')
print('LOADED2')

detections = detector.detectObjectsFromImage(
    input_image=os.path.join(
        execution_path,
        "C:\\Users\\Admin_Robo\\Desktop\\project\\images\\kh.jpg"),
    output_image_path=os.path.join(
        execution_path, "C:\\Users\\Admin_Robo\\Desktop\\iimg1.jpg"))
print(
    '=================================IMG 1=================================')
for eachObject in detections:
    print(eachObject["name"], " : ", eachObject["percentage_probability"])
Esempio n. 9
0
    objectsToDetect = args.objects.split(',')
    directory = os.path.sep.join(["data", args.folder])

    folderA = directory + "_A"
    if not os.path.exists(folderA): os.makedirs(folderA)
    else: raise Exception("Folder A already exists")
    folderB = directory + "_B"
    if not os.path.exists(folderB): os.makedirs(folderB)
    else: raise Exception("Folder B already exists")

    # https://github.com/OlafenwaMoses/ImageAI/releases/tag/1.0/

    if args.strategy == "Detection":
        detector = ObjectDetection()
        detector.setModelTypeAsYOLOv3()
        detector.setModelPath("./models/yolo.h5")
        detector.loadModel()
    else:
        prediction = ImagePrediction()
        prediction.setModelTypeAsResNet()
        prediction.setModelPath(
            "./models/resnet50_weights_tf_dim_ordering_tf_kernels.h5")
        prediction.loadModel()

    for this in os.listdir(directory):

        filePath = os.path.join(directory, this)
        aPath = os.path.join(folderA, this)
        bPath = os.path.join(folderB, this)

        if args.strategy == "Detection":
class LiveDetector:
    def __init__(self):
        # Instantiate detector
        self.detector = ObjectDetection()

        # Set and load model
        self.model_path = "D:/Final-Year-Project/Object-tracking/models/yolo.h5"
        self.detector.setModelTypeAsYOLOv3()
        self.detector.setModelPath(self.model_path)
        self.detector.loadModel()

        # Set custom objects
        self.custom_objects = self.detector.CustomObjects(car=True,
                                                          motorcycle=True,
                                                          person=True,
                                                          bicycle=True,
                                                          dog=True)
        self.tracker = CentroidTracker()

    def track_objects(self, frame):
        rects = []
        names = []
        data = {}
        frame = self.pixelate_frontyard((100, 100), frame)

        returned_image, detection = self.detector.detectCustomObjectsFromImage(
            custom_objects=self.custom_objects,
            input_image=frame,
            output_type="array",
            input_type="array")
        for eachObject in detection:
            rects.append(eachObject["box_points"])
            names.append(eachObject["name"])

            (startX, startY, endX, endY) = eachObject["box_points"]
            cv2.rectangle(frame, (startX, startY), (endX, endY), (0, 255, 0),
                          2)
            self.blur_object((startX, startY), (endX, endY), (11, 11), frame)

        objects = self.tracker.update(rects, names)

        if objects is not None:
            for objectID, objectDetails in objects.items():
                # draw both the ID of the object and the centroid of the
                # object on the output frame
                centroid = objectDetails[0]
                name = objectDetails[1]
                if self.tracker.disappeared[objectID] < 1:
                    text = name + " " + str(objectID)
                    cv2.putText(frame, text,
                                (centroid[0] - 30, centroid[1] - 10),
                                cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
                    cv2.circle(frame, (centroid[0], centroid[1]), 4,
                               (0, 255, 0), -1)

                    data[name] = objectID

        return frame, data

    def blur_object(self, topLeft, bottomRight, kSize, frame):
        x, y = topLeft[0], topLeft[1]
        w, h = bottomRight[0] - topLeft[0], bottomRight[1] - topLeft[1]

        ROI = frame[y:y + h, x:x + w]
        blur = cv2.GaussianBlur(ROI, kSize, 0)

        frame[y:y + h, x:x + w] = blur

    def blur_frontyard(self, kSize, frame):
        height, width, channel = frame.shape
        ROI_corners = np.array([[(320, 490), (895, 320), (895, height),
                                 (320, height)]],
                               dtype=np.int32)
        blurred_frame = cv2.GaussianBlur(frame, kSize, 0)
        mask = np.zeros(frame.shape, dtype=np.uint8)
        ignore_mask_color = (255, ) * channel
        cv2.fillPoly(mask, ROI_corners, ignore_mask_color)
        mask_inverse = np.ones(mask.shape).astype(np.uint8) * 255 - mask
        frame = cv2.bitwise_and(blurred_frame, mask) + cv2.bitwise_and(
            frame, mask_inverse)

        return frame

    def pixelate_frontyard(self, kSize, frame):
        height, width, channel = frame.shape
        w, h = kSize
        ROI_corners = np.array([[(320, 490), (895, 320), (895, height),
                                 (320, height)]],
                               dtype=np.int32)
        temp = cv2.resize(frame, (w, h), interpolation=cv2.INTER_LINEAR)
        pixelated_frame = cv2.resize(temp, (width, height),
                                     interpolation=cv2.INTER_NEAREST)
        mask = np.zeros(frame.shape, dtype=np.uint8)
        ignore_mask_color = (255, ) * channel
        cv2.fillPoly(mask, ROI_corners, ignore_mask_color)
        mask_inverse = np.ones(mask.shape).astype(np.uint8) * 255 - mask
        frame = cv2.bitwise_and(pixelated_frame, mask) + cv2.bitwise_and(
            frame, mask_inverse)

        return frame
import os
import sys
import math
import cv2
from imageai.Detection import ObjectDetection

VIDEOPATH = os.environ['AICITYVIDEOPATH'] + "/test-data/"
WEIGHTPATH = "./yolo.h5"
TXTOUTPATH = "Detections/"
os.mkdir(TXTOUTPATH)
detector = ObjectDetection()
detector.setModelTypeAsYOLOv3()
detector.setModelPath(WEIGHTPATH)
detector.loadModel()
custom = detector.CustomObjects(car=True, bus=True, truck=True)

print("Using Input Video Path : " + VIDEOPATH)

for video_num in range(1, 101):
    cap = cv2.VideoCapture(VIDEOPATH + str(video_num) + '.mp4')
    if not cap.isOpened():
        raise IOError("Couldn't open webcam or video")
    framecount = 0
    writelist = []
    while (cap.isOpened()):
        ret, frame = cap.read()
        framecount += 1
        print(framecount)
        if (not ret):
            break
        ret_img, detections = detector.detectCustomObjectsFromImage(
Esempio n. 12
0
from imageai.Detection import ObjectDetection
import os
from time import time

execution_path = os.getcwd()

detector = ObjectDetection()
detector.setModelTypeAsRetinaNet()
detector.setModelPath( os.path.join(execution_path , "resnet50_coco_best_v2.0.1.h5"))
detector.loadModel(detection_speed="flash")

our_time = time()
detections = detector.detectObjectsFromImage(input_image=os.path.join(execution_path , "6.jpg"), output_image_path=os.path.join(execution_path , "6flash.jpg"), minimum_percentage_probability=30)
print("IT TOOK : ", time() - our_time)
for eachObject in detections:
    print(eachObject["name"] + " : " + eachObject["percentage_probability"] )
    print("--------------------------------")
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Feb 16 17:02:26 2019

@author: pivotalit
"""

from imageai.Detection import ObjectDetection
import os

execution_path = os.getcwd()

detector = ObjectDetection()
detector.setModelTypeAsRetinaNet()
detector.setModelPath(
    os.path.join(execution_path,
                 "/users/pivotalit/downloads/resnet50_coco_best_v2.0.1.h5"))
detector.loadModel()
detections = detector.detectObjectsFromImage(
    input_image="/users/pivotalit/downloads/Objects.jpeg",
    output_image_path="/users/pivotalit/downloads/imagenew.jpg")

for eachObject in detections:
    print(eachObject["name"], " : ", eachObject["percentage_probability"])
"""
Created on Sun Jun 24 08:14:28 2018

@author: hp
"""

from imageai.Detection import ObjectDetection
import os

execution_path = os.getcwd()

detector = ObjectDetection()
detector.setModelTypeAsRetinaNet()
detector.setModelPath(
    os.path.join(
        execution_path,
        "C:\\Users\\hp\\Downloads\\imageAI objectDetection1\\resnet50_coco_best_v2.0.1.h5"
    ))
detector.loadModel()
detections = detector.detectObjectsFromImage(
    input_image=os.path.join(
        execution_path,
        "C:\\Users\\hp\\Downloads\\imageAI objectDetection1\\input images\\image2.jpg"
    ),
    output_image_path=os.path.join(
        execution_path,
        "C:\\Users\\hp\\Downloads\\imageAI objectDetection1\\output images\\image2new.jpg"
    ))

for eachObject in detections:
    print(eachObject["name"] + " : " + eachObject["percentage_probability"])
Esempio n. 15
0
def main():

    detector = ObjectDetection()
    detector.setModelTypeAsRetinaNet()
    detector.setModelPath(
        os.path.join(execution_path, "resnet50_coco_best_v2.0.1.h5"))
    detector.loadModel("faster")

    s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)

    try:
        #s.bind((socket.gethostname(), 8000))
        s.bind((HOST, 8000))
    except socket.error as err:
        print("Error de bind")
        sys.exit()

    s.listen(5)

    img = carga_imagen("listo.png")
    canvas.create_image(0, 0, image=img, anchor=NW)

    text.config(state="normal")
    text.delete(1.0, END)
    text.insert(INSERT, "Esperando una conexion")
    text.config(state="disabled")

    while True:
        print("esperando una conexion")
        clientsocket, address = s.accept()
        print(f"Conexion desde {address} establecida ")
        f = open("recibido.png", "wb")

        img = carga_imagen("cargando1.png")
        canvas.create_image(0, 0, image=img, anchor=NW)

        full_msg = ''

        #msg = clientsocket.recv(1024)
        #print(msg)
        #temp = msg[22:]
        #full_msg += temp.decode("utf-8")
        while True:
            msg = clientsocket.recv(1024)
            #print(msg)
            if len(msg) < 1024:
                try:
                    #temp = msg[7:]
                    #full_msg += temp.decode("utf-8")
                    full_msg += msg.decode("utf-8")
                except:
                    print("No se pudo decodificar el mensaje")
                break
            try:
                full_msg += msg.decode("utf-8")
            except:
                print("No se pudo decodificar el mensaje")
        #msg = clientsocket.recv(100000)
        print(full_msg)
        try:
            js = literal_eval(str(full_msg))
        except:
            print("No se pudo convertir el mensaje a formato json")

        text.config(state="normal")
        text.delete(1.0, END)
        text.insert(
            INSERT, "Datos sensor ultrasonico: " + str(js["distancia"]) +
            "cm de distancia\n\nDatos camara: Fotografia con un peso de " +
            str(len(js["imagen"])) + " bytes\n\n")
        text.config(state="disabled")

        try:
            imagen = base64.b64decode(js["imagen"])
            f.write(imagen)
            detections = detector.detectObjectsFromImage(
                input_image=os.path.join(execution_path, "recibido.png"),
                output_image_path=os.path.join(execution_path, "imagenew.png"),
                minimum_percentage_probability=40)

            img = carga_imagen("imagenew.png")
            canvas.create_image(0, 0, image=img, anchor=NW)
            objetos_detectados = compilar1(detections)
            """if len(detections) != 0:
                for eachObject in detections:
                    objetos_detectados += ""+str(verificarGen(traducir(eachObject["name"])))+ ";" + str(eachObject["percentage_probability"] )+" "
            else:
                objetos_detectados += "una pared u objeto desconocido "
            """
            text.config(state="normal")
            text.insert(
                INSERT,
                "Objeto(s) identificado(s): " + objetos_detectados + "\n\n")

            msg_to_send = compilar2(detections, js)

            #msg_to_send = direccion(str(js["direccion"]))+"hay "+objetos_detectados+"a "+str(js["distancia"])+"cm de distancia de usted"
            print("Natural: " + msg_to_send)
            print("\nMorse: " + str(encode(msg_to_send)))

            text.insert(
                INSERT, "Mensaje en lenguaje natural: " + msg_to_send +
                "\n\nMensaje en codigo morse: " + encode(msg_to_send) + "\n\n")
            text.config(state="disabled")

            clientsocket.send(bytes(msg_to_send, "utf-8"))
            clientsocket.close()
        except:
            print("Error grave")
            clientsocket.close()
            img = carga_imagen("listo.png")
            canvas.create_image(0, 0, image=img, anchor=NW)
            text.config(state="normal")
            text.delete(1.0, END)
            text.insert(INSERT, "Esperando una conexion")
            text.config(state="disabled")
from imageai.Detection import ObjectDetection
import cv2
import os
import time
import numpy as np
detector = ObjectDetection()
detector.setModelTypeAsYOLOv3()
detector.setModelPath("yolo.h5")
detector.loadModel()
cap = cv2.VideoCapture(0)
time.sleep(3)
while (cap.isOpened()):
    flag, img = cap.read()
    img = np.flip(img, axis=1)
    if not flag:
        break
    else:
        cv2.imwrite("new_image.jpg", img)
        detection = detector.detectObjectsFromImage(
            input_image="new_image.jpg", output_image_path="new_image.jpg")
        filename = cv2.imread("new_image.jpg")
        cv2.imshow("window", filename)
        k = cv2.waitKey(1)
        if k == 27:
            break
        else:
            continue
cap.release()
os.remove("new_image.jpg")
cv2.destroyAllWindows()
Esempio n. 17
0
# -*- coding: utf-8 -*-
"""
Created on Fri Jun  5 23:09:20 2020

@author: oguzkaya
"""

from imageai.Detection import ObjectDetection
import os
from time import time
models_path = "".join((os.getcwd().rstrip("examples"), "models"))
image_path = "".join((os.getcwd().rstrip("examples"), "images"))
detector = ObjectDetection()
detector.setModelTypeAsYOLOv3()
detector.setModelPath(os.path.join(models_path, "yolo.h5"))
detector.loadModel()
our_time = time()
detections = detector.detectObjectsFromImage(
    input_image=os.path.join(image_path, "image3.jpg"),
    output_image_path=os.path.join(image_path, "image3new.jpg"),
    minimum_percentage_probability=30)
print("IT TOOK : ", time() - our_time)
for eachObject in detections:
    print(eachObject["name"], " : ", eachObject["percentage_probability"],
          " : ", eachObject["box_points"])
    print("--------------------------------")
Esempio n. 18
0
from bokeh.models.tools import HoverTool
from datetime import datetime
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import tensorflow as tf
import keras as ks
from keras import metrics
from ortools.constraint_solver import pywrapcp
import glob

execution_path = os.getcwd()
detector = ObjectDetection()
detector.setModelTypeAsRetinaNet()
#detector.setModelPath( os.path.join(execution_path , "resnet50_coco_best_v2.0.1.h5"))
detector.setModelPath( os.path.join(execution_path , "frozen_inference_graph.pb"))
detector.loadModel()
#for file in os.listdir(execution_path):
#if (file.endswith(".jpg") or file.endswith(".jpeg")):
      
list_of_files = glob.glob('./*.jpg')
input_image = max(list_of_files, key=os.path.getctime)
#print (latest_file) 
#input_image = os.path.join(execution_path , file)
ImageName = "./detection/Detection"+str(random.randint(1,10000))+".jpg"
detections = detector.detectObjectsFromImage(input_image, output_image_path = ImageName)
#output_image_path=os.path.join(execution_path , "imagenew.jpg"))
for eachObject in detections:
print(eachObject["name"] , " : " , eachObject["percentage_probability"] )
#remove_uploaded_image()
#return 'Succesful detection'
Esempio n. 19
0
from imageai.Detection import ObjectDetection
import os
import cv2 
from PIL import Image

cap = cv2.VideoCapture(0) 

# Load Yolo
execution_path = os.getcwd()
detector = ObjectDetection()

#detector.setModelTypeAsTinyYOLOv3()  # YOLOv3
#detector.setModelPath( os.path.join(execution_path , "models/yolo-tiny.h5"))
detector.setModelTypeAsRetinaNet()  # Other types are TinyYOLOv3, YOLOv3
detector.setModelPath( os.path.join(execution_path , "models/resnet50_coco_best_v2.0.1.h5"))  #yolo-tiny.h5

detector.loadModel() #detection_speed="fastest"

process_this_frame = True

while 1: 
    
	# reads frames from a camera 
    ret, img = cap.read() 
    # img_str = cv2.imencode('.jpg', img)[1].tostring()
    
    if process_this_frame:
		
        img = cv2.resize(img, (0,0), fx=0.5, fy=0.5)
        
        detections = detector.detectObjectsFromImage(input_type="array", input_image=img, output_type="file")
Esempio n. 20
0
from __future__ import division, print_function
# coding=utf-8
import sys
import os
import numpy as np
from werkzeug.utils import secure_filename
from flask import Flask, redirect, url_for, request, render_template
from imageai.Detection import ObjectDetection
import tensorflow as tf
# Define a flask app
app = Flask(__name__)

detector = ObjectDetection()
detector.setModelTypeAsRetinaNet()
detector.setModelPath('resnet50_coco_best_v2.0.1.h5')
detector.loadModel()
custom_objects = detector.CustomObjects(dog=True, cat=True)


def detect_the_pet(img, detector):
    detections = detector.detectCustomObjectsFromImage(
        custom_objects=custom_objects,
        input_image=img,
        output_image_path='result/result.png',
        minimum_percentage_probability=40)
    return detections


@app.route('/', methods=['GET'])
def index():
    # Main page
Esempio n. 21
0
# ref: https://imageai.readthedocs.io/en/latest/detection/index.html

# This module will count the objects in an image and store them

import time
from imageai.Detection import ObjectDetection
import os

img_detector = ObjectDetection()
img_detector.setModelTypeAsYOLOv3()  #147 layers
img_detector.setModelPath(
    os.path.join("/catkin_ws/src/master_pkg/src/", "yolo.h5"))  #147 layers
img_detector.loadModel(
    detection_speed="fast")  #normal, fast, faster, fastest, flash
# Note increases in speed should coorespond to lower a 'min. % probability' value


def count(image_array):
    try:
        det_frame, detections = img_detector.detectObjectsFromImage(
            input_type="array",
            minimum_percentage_probability=60,
            input_image=image_array,
            output_type="array")

        print("-------------")
        for item in detections:
            print(item["name"])
        print("-------------")
    except:
        rospy.loginfo("Could not count objects.")
Esempio n. 22
0
from imageai.Detection import ObjectDetection
import os

detector = ObjectDetection()
detector.setModelTypeAsRetinaNet()
detector.setModelPath(
    os.path.join("C:/Users/HP/Downloads/resnet50_coco_best_v2.0.1.h5"))
detector.loadModel()
detections = detector.detectObjectsFromImage(
    input_image=os.path.join(
        "F:/Photos/Campus life(Tangail)/IMG_20180404_174403.jpg"),
    output_image_path=os.path.join(
        "D:/Pycharm/Program/AI_Stat_ANN/detection.jpg"))
for eachObject in detections:
    print(eachObject["name"], " : ", eachObject["percentage_probability"])
detections, extracted_images = detector.detectObjectsFromImage(
    input_image=os.path.join(
        "F:/Photos/Campus life(Tangail)/IMG_20180404_174403.jpg"),
    output_image_path=os.path.join(
        "D:/Pycharm/Program/AI_Stat_ANN/detection.jpg"),
    extract_detected_objects=True)
Esempio n. 23
0
class PriusPredictor(object):
	def __init__(self, image_path, model_path, output_path):
		self.avgColor = []
		self.pcaColors = []

		self.detector = ObjectDetection()
		self.detector.setModelTypeAsYOLOv3()
		self.detector.setModelPath(model_path + "yolo.h5")
		self.detector.loadModel(detection_speed="flash")

		self.prediction = CustomImagePrediction()
		self.prediction.setModelTypeAsResNet()
		#self.prediction.setModelPath(model_path + "model_ex-012_acc-0.988819.h5")
		self.prediction.setModelPath(model_path + "model_ex-043_acc-0.996787.h5")
		self.prediction.setJsonPath(model_path + "model_class.json")
		self.prediction.loadModel(num_objects=2)

		now = time.localtime()
		self.frame_folder = str(now.tm_year) + str(now.tm_mon) + str(now.tm_mday)
		self.image_path = image_path
		self.output_path = output_path + "detection/" + self.frame_folder + "/"

		if os.path.exists(image_path) is False:
			os.mkdir(image_path)

		if os.path.exists(output_path) is False:
			os.mkdir(output_path)

		if os.path.exists(os.path.join(output_path, 'detection')) is False:
			os.mkdir(os.path.join(output_path, 'detection'))

		if os.path.exists(os.path.join(output_path, 'processed')) is False:
			os.mkdir(os.path.join(output_path, 'processed'))

		self.create_output_folder()

	def create_output_folder(self):
		if os.path.exists(self.output_path) is False:
			os.mkdir(self.output_path)

	def predict_vehicle_method(self, prediction_meta):
		detected_img = os.path.join(prediction_meta['image_path'], prediction_meta['image_name'])
		if os.path.exists(detected_img) is not True:
			detected_img = prediction_meta['image_path']

		return self.prediction.predictImage(detected_img, result_count=2)

	def predict_vehicle(self, prediction_meta):
		detected_img = prediction_meta['image_path']

		return self.prediction.predictImage(detected_img, result_count=2)

	def detect_pca(self, image):
		priusImage = PriusImage.from_path(image)
		return priusImage.has_pca_match()

	def detect_vehicle(self, meta_data):
		try:

			image = os.path.join(meta_data["image_path"], meta_data['image_name'])
			output_image = self.output_path + meta_data['image_name']
			#print("Detecting vehicle for " + meta_data['image_name'] + " -> " + output_image)
			if os.path.exists(image) is not True:
				print("File doesnt exist. File: "  + image)
			custom_objects = self.detector.CustomObjects(car=True)
			detections, objects_path = self.detector.detectCustomObjectsFromImage(custom_objects=custom_objects,
				                                                                 input_image=image,
				                                                                 extract_detected_objects=True,
				                                                                 output_image_path=output_image,
				                                                                 minimum_percentage_probability=50)

			return zip(detections, objects_path)
		except Exception as e:
			print("While detecting vehicle: " + str(e))

	def detect_vehicle_from_array(self, decoded):

			#print("Detecting vehicle for " + meta_data['image_name'] + " -> " + output_image)

		custom_objects = self.detector.CustomObjects(car=True)
		result = self.detector.detectCustomObjectsFromImage(custom_objects=custom_objects,
		                                                                     input_type="array",
			                                                                 input_image=np.array(decoded),
			                                                                 #output_type="array",
			                                                                 minimum_percentage_probability=50)

		print("Detected: " + str(result))
		return result
Esempio n. 24
0
from imageai.Detection import ObjectDetection, VideoObjectDetection
from glob import glob
import os
'''
TODO
Read upon other models at https://github.com/OlafenwaMoses/ImageAI/releases/tag/1.0/
'''
detection_obj = ObjectDetection()
detection_obj.setModelTypeAsRetinaNet(
)  # you can choose between retinaNet, YOLOv3 and TinyYOLOv3
detection_obj.setModelPath("models/resnet50_coco_best_v2.0.1.h5")
detection_obj.loadModel()
for img in glob("inputs/*"):
    detections, extracted_images = detection_obj.detectObjectsFromImage(
        input_image=img,
        output_image_path=os.path.join("outputs/",
                                       f'detected_{str(img).split("/")[1]}'),
        extract_detected_objects=True,
        minimum_percentage_probability=80)
    # the last parameter allows us to extract images retrieved by the bounding box as independent images
    # adjust minimum probability to set a threshold where the output obeys.
    for eachObject in detections:
        print(eachObject["name"], " : ", eachObject["percentage_probability"]
              )  # retrieve the object name and relevant probability
Esempio n. 25
0
            if each_image.startswith("2") or each_image.startswith(
                    "b") or each_image.startswith("B"):
                if (each_image.endswith(".jpg") or each_image.endswith(".png")
                        or each_image.endswith(".JPG")
                        or each_image.endswith(".PNG")):
                    images_paths.append(each_folder_path + "/" + each_image)
                    images_names.append(each_image)

    json_path = dir_path + "/json_result/image_data.json"
    image_data = open(json_path, 'w+')

    neural_path = dir_path + "/neural_network/yolo.h5"

    detector = ObjectDetection()
    detector.setModelTypeAsYOLOv3()
    detector.setModelPath(neural_path)
    detector.loadModel()

    output_path = dir_path + "/results/"
    print("")
    print("PROCESSING IMAGES...")
    print("")
    for path in tqdm(images_paths):
        for name in images_names:
            if name in path:
                except_flag = False

                # use output_image = path to results, and delete in output_type in order to get the output images in the file results
                try:
                    returned_image, detections = detector.detectObjectsFromImage(
                        input_image=path,
Esempio n. 26
0
class CountingObject(object):
    """
    A class of counting objects
    """

    algos = {
        "resnet": "resnet50_coco_best_v2.0.1.h5",
        "yolov3": "yolo.h5",
        "yolo_tiny": "yolo-tiny.h5"
    }

    def __init__(self, stream_link):
        self.stream_link = stream_link
        self.streams = streamlink.streams(stream_link)
        if self.streams is None:
            raise ValueError("cannot open the stream link %s" % stream_link)

        q = list(self.streams.keys())[0]
        self.stream = self.streams['%s' % q]

        self.target_img_path = '/home/yyf/ego_parking/app/static/'

        self.detector = ObjectDetection()
        if self.detector is None:
            raise ValueError("Detector of objects is None")

    def detector_init(self, algo="resnet", speed="nomal"):
        """
        Must be invoked after instantiate for initialize a object detector. 
        
        Args:
            algo (str): The algorithm of object detection tasks. "resnet"(default), "yolov3", "yolo_tiny".
            speed (str): The detection speed for object detetion tasks. "normal"(default), "fast", "faster" , "fastest" and "flash".
        
        Returns:
            void
        
        """

        if algo == "resnet":
            self.detector.setModelTypeAsRetinaNet()
            self.detector.setModelPath(
                os.path.join(self.target_img_path, self.algos["resnet"]))
        elif algo == "yolov3":
            self.detector.setModelTypeAsYOLOv3()
            self.detector.setModelPath(
                os.path.join(self.target_img_path, self.algos["yolov3"]))
        elif algo == "yolo_tiny":
            self.detector.setModelTypeAsTinyYOLOv3()
            self.detector.setModelPath(
                os.path.join(self.target_img_path, self.algos["yolo_tiny"]))
        else:
            print("Given algorithm of object detection is invalid.")
            return

        self.detector.loadModel(detection_speed=speed)
        self.custom_objects = self.detector.CustomObjects(car=True)

    def put_text_to_img(self, img, text, pos = (50,50), fontColor=(0,0,255), lineType=2):
        """
        Put text to an image.
        
        Args:
            img : An image represented by numpy array. You can use cv2.imread(path_to_iamge) to read an image in the filesystem by
                    giving the image path.
            text (str): The text what you want to put to the image.
            pos (tuple): x and y position relative to the origin (0,0) at the top left.
            fontColor (tuple): R G B channel.
            lineType (int): Type of line.
        
        Returns:
            void
        
        """
        if img is None:
            print("Put text to a none image.")
            return

        font = cv2.FONT_HERSHEY_SIMPLEX
        fontScale = 1

        cv2.putText(img, text, pos, font, fontScale, fontColor, lineType)

    def capture_frame_by_stream_wrapper(self,
                                        image_prefix="stream",
                                        mprob=30,
                                        num_im=6,
                                        time_interval=10,
                                        tz=None):
        """
        A wrapper of the function capture_frame_by_stream.
        
        Args:
            image_prefix (str): Prefix of target images. The postfix is numerated by numbers.
            mprob (int): Minimum probability to be a person.
            num_im (int): How many images will be taken.
            time_interval (int): Time interval of taking next image, the unit is second.
			tz (str): Time zone from package pytz. Default is None, then apply utc time. Use function pytz.all_timezones to get the list of timezones.
        
        Returns:
            void
        
        """
        print("The current conuting function is based on capture frame by stream.")

        dir_path = os.path.join(self.target_img_path, image_prefix)
        if not os.path.isdir(dir_path):
            os.makedirs(dir_path)
        frames_res = []
        if num_im <= 0:
            try:
                i = 0
                while True:
                    i = i + 1
                    frame_res = self.capture_frame_by_stream(
                        image_prefix, i, mprob, tz)
                    frames.res.append(frame_res)
                    time.sleep(time_interval)
            except KeyboardInterrupt:
                return frames_res
                print('Abort by key interrupt.')
        else:
            for i in range(num_im):
                frame_res = self.capture_frame_by_stream(
                    image_prefix, i, mprob, tz)
                frames_res.append(frame_res)
                time.sleep(time_interval)

            return frames_res

    def capture_frame_by_stream(self,
                                image_prefix="stream",
                                image_index=0,
                                mprob=30,
                                tz=None) -> int:
        """
        capture a frame from a online stream, namely webcam.
        
        Args:
            image_prefix (str): Prefix of target images. The postfix is numerated by numbers.
            image_index (int): The postfix of target images. By default, numerated from 0.
            mprob (int): Minimum probability to be a person.
		    tz (str): Time zone from package pytz. Default is None, then apply utc time. Use function pytz.all_timezones to get the list of timezones.

		
        Returns:
            tuple: The name of target image, the number of persons in an image detected by the model and the current time.
        """
		
        video_cap = cv2.VideoCapture(self.stream.url)
        dir_path = os.path.join(self.target_img_path, image_prefix)

        if video_cap is None:
            print("Open webcam [%s] failed." % self.stream.url)
            return None
        else:
            ret, frame = video_cap.read()

            if not ret:
                print("Captured frame is broken.")
                video_cap.release()
                return None
            else:
                print("-----------------------------------------------------")
                
                if tz is None:
                    current_time = datetime.utcnow().strftime(
                        "%a %Y-%m-%d %H:%M:%S")
                    print('### time zone is None, therefore use utc time ###')
                else:
                    current_time = datetime.now(
                        timezone(tz)).strftime("%a %Y-%m-%d %H:%M:%S")

                print("Capturing frame %d." % image_index)
                target_img_name = "{}{}.png".format(image_prefix, image_index)
                # frame = crop_frame(frame, target_img_name)  # comment to unuse the crop function.
                
                cv2.imwrite(os.path.join(dir_path, target_img_name), frame)

                detections = self.detector.detectCustomObjectsFromImage(
                    custom_objects=self.custom_objects,
                    input_image=os.path.join(dir_path, target_img_name),
                    output_image_path=os.path.join(dir_path, target_img_name),
                    minimum_percentage_probability=mprob)

                print(
                    "The number of cars in frame %d (%s):" %
                    (image_index, target_img_name), len(detections))
                print(
                    "The current time in frame %d (%s):" %
                    (image_index, target_img_name), current_time)

                img = cv2.imread(os.path.join(dir_path, target_img_name))
                # put the number of persons to the image and put timestamp to the image
                self.put_text_to_img(
                    img, "The number of cars:%s " % str(len(detections)))
                img_height, img_width = img.shape[0:2]
                self.put_text_to_img(
                    img, "The current time:%s " % current_time, pos=(int(img_width*0.1), int(img_height*0.9)))

                cv2.imwrite(os.path.join(dir_path, target_img_name), img)
                video_cap.release()

                return target_img_name, len(detections), current_time

    def capture_frame_by_screenshot_wrapper(self,
                                            image_prefix="screenshot",
                                            mprob=30,
                                            num_im=6,
                                            time_interval=10,
                                            tz=None):
        """
        A wrapper of the function capture_frame_by_screenshot.
        
        Args:
            image_prefix (str): Prefix of target images. The postfix is numerated by numbers.
            mprob (int): Minimum probability to be a person.
            num_im (int): How many images will be taken.
            time_interval (int): Time interval of taking next image, the unit is second.
			tz (str): Time zone from package pytz. Default is None, then apply utc time. Use function pytz.all_timezones to get the list of timezones.

        
        Returns:
            void
        
        """
        print(
            "The current conuting function is based on capture frame by screenshot."
        )

        frames_res = []
        dir_path = os.path.join(self.target_img_path, image_prefix)
        if not os.path.isdir(dir_path):
            os.makedirs(dir_path)
        if num_im <= 0:
            try:
                i = 0
                while True:
                    i = i + 1
                    frame_res = self.capture_frame_by_screenshot(
                        image_prefix, i, mprob, tz)
                    frames_res.append(frame_res)
                    time.sleep(time_interval)
            except KeyboardInterrupt:
                if self.driver is not None:
                    self.driver.quit()
                return frames_res
                print('Abort by key interrupt.')
        else:
            for i in range(num_im):
                frame_res = self.capture_frame_by_screenshot(
                    image_prefix, i, mprob, tz)
                frames_res.append(frame_res)
                time.sleep(time_interval)

            if self.driver is not None:
                self.driver.quit()

            return frames_res

    def capture_frame_by_screenshot(self,
                                    image_prefix="screenshot",
                                    image_index=0,
                                    mprob=30,
                                    num_im=6,
                                    tz=None) -> int:
        """
       capture an image by taking a screenshot on an opened website via browser.
        
        Args:
            image_prefix (str): Prefix of target images. The postfix is numerated by numbers.
            image_index (int): The postfix of target images. By default, numerated from 0.
            mprob (int): Minimum probability to be a person.
			tz (str): Time zone from package pytz. Default is None, then apply utc time. Use function pytz.all_timezones to get the list of timezones.

        
        Returns:
            tuple: The name of target image, the number of persons in an image detected by the model and the current time.
        
        """
		
        dir_path = os.path.join(self.target_img_path, image_prefix)

        if self.driver is None:
            print("Web driver is none.")
            return None
        else:
            print("-----------------------------------------------------")

            if tz is None:
                current_time = datetime.utcnow().strftime(
                    "%a %Y-%m-%d %H:%M:%S")
                print('### time zone is None, therefore use utc time###')
            else:
                current_time = datetime.now(
                    timezone(tz)).strftime("%a %Y-%m-%d %H:%M:%S")

            target_img_name = "{}{}.png".format(image_prefix, image_index)
            print("Taking screenshot %d..." % image_index)
            self.driver.save_screenshot(
                os.path.join(dir_path, target_img_name))
            detections = self.detector.detectCustomObjectsFromImage(
                custom_objects=self.custom_objects,
                input_image=os.path.join(dir_path,
                                         target_img_name),
                output_image_path=os.path.join(dir_path, target_img_name),
                minimum_percentage_probability=mprob)

            print(
                "The number of cars in frame %d (%s):" % (image_index,
                                                            target_img_name),
                len(detections))
            print(
                "The current time in frame %d (%s):" %
                (image_index, target_img_name), current_time)

            img = cv2.imread(os.path.join(dir_path, target_img_name))
            # put the number of persons to the image
            self.put_text_to_img(
                img, "The number of cars is:%s" % str(len(detections)))
            img_height, img_width = img.shape[0:2]
            self.put_text_to_img(
                img, "The current time:%s " % current_time, pos=(int(img_width*0.1), int(img_height*0.9)))

            cv2.imwrite(os.path.join(dir_path, target_img_name), img)

            return target_img_name, len(detections), current_time

    def init_webdriver(self):
        """
       Initialize the webdriver of Chrome by using the python lib selenium.
        
        Args:
            Void
        
        Returns:
            Void
        """
		
        self.driver = webdriver.Chrome(
        )  # Optional argument, if not specified will search path.
        self.driver.get(self.stream_link)
        time.sleep(15)  # Jump over the ads
        
    def store_info_in_df_csv(self, infos, cvs_filename="counting_person", ):
        """
       Collect test dataset by storing the image name and the detected number of persons in a csv file.
        
        Args:
            infos (list): The infos of images contain the image name, the number of detected persons, current time of given time zone
                          and the empty ground-truth.
            cvs_filename (str): The name of csv file.
        
        Returns:
            df (DataFrame): Show the image name, the detected number of persons, current time of given time zone
        """
		
        df = pd.DataFrame(
            np.array(infos), columns=['image_name', 'detected_num', 'time'])
        # df["counted_num"] = ""  #only for baseline
        df.to_csv(
            path_or_buf=os.path.join(self.target_img_path, "%s.csv" %
                                     cvs_filename))
        return df
Esempio n. 27
0
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
from imageai.Detection import ObjectDetection
import os

workdir = os.getcwd()

find_objects = ObjectDetection()
find_objects.setModelTypeAsRetinaNet()

find_objects.setModelPath(os.path.join(workdir,
                                       "resnet50_coco_best_v2.0.1.h5"))
find_objects.loadModel()

objects_found = find_objects.detectObjectsFromImage(
    input_image=os.path.join(workdir, "ilia.jpg"),
    output_image_path=os.path.join(workdir, "iliaRevealed.jpg"))

for objects in objects_found:
    print(objects["name"] + " : " + str(objects["percentage_probability"]))
Esempio n. 28
0
# https://towardsdatascience.com/object-detection-with-10-lines-of-code-d6cb4d86f606
from imageai.Detection import ObjectDetection
import os

execution_path = os.getcwd()

detector = ObjectDetection()
detector.setModelTypeAsRetinaNet()
detector.setModelPath(
    os.path.join(execution_path, "resnet50_coco_best_v2.0.1.h5"))
detector.loadModel()
detections = detector.detectObjectsFromImage(
    input_image=os.path.join(execution_path, "../assets/team.png"),
    output_image_path=os.path.join(execution_path, "output.jpg"))

for eachObject in detections:
    print(eachObject["name"], " : ", eachObject["percentage_probability"])
Esempio n. 29
0
def control():
    global num_seed
    num_seed = 0
    model = load_model(MODEL_NAME)
    #-----------
    imagede = '/home/pi/Desktop/photos/default.jpg'
    img_default = load_image(imagede)
    classify(model, img_default)

    ##初始化
    pygame.init()
    ##变量存放处

    size = width, height = 300, 200
    bgColor = (0, 0, 0)

    ##設置界面寬高

    screen = pygame.display.set_mode(size)

    ##設置標題

    pygame.display.set_caption("Team 1 Monitor")

    ##要在Pygame中使用文本,必须创建Font对象

    ##第一个参数指定字体 ,第二个参数指定字体大小

    font = pygame.font.Font(None, 20)

    ##调用get_linesize()方法获得每行文本的高度

    line_height = font.get_linesize()
    position = 0
    screen.fill(bgColor)

    ##创建一个存放的文本TXT

    # f = open("record.txt",'w')

    while True:

        for event in pygame.event.get():
            if event.type == pygame.QUIT:
                # 關閉文件
                # f.close()
                sys.exit()
            # print('GG\n')
            if event.type == pygame.KEYDOWN:
                # f.write(str(event) + '\n')
                if event.key == K_w:
                    # print('w\n')
                    cm.send('#W')
                elif event.key == K_s:
                    cm.send('#S')
                if event.key == K_j:
                    # print('w\n')
                    cm.send('#w')
                elif event.key == K_k:
                    cm.send('#s')

                elif event.key == K_d:
                    cm.send('#D')
                elif event.key == K_a:
                    cm.send('#A')

                elif event.key == K_x:
                    cm.send('#x')
                elif event.key == K_b:
                    cm.send('#b')
                # --------------------------------------------

                elif event.key == K_p:
                    camera.stop()
                    imagepath = '/home/pi/Desktop/photos/' + str(
                        num_seed) + '.jpg'
                    img = load_image(imagepath)
                    label, prob, _ = classify(model, img)
                    print(
                        'we think image name:{} with certainty {} that it is {}'
                        .format(imagepath, prob, label))

                # ------------------------------
                # 目标跟随,返回
                #  hd5文件请放在执行文件目录下,输入输出在photos文件夹
                elif event.key == K_g:
                    camera.stop()
                    imagepath = '/home/pi/Desktop/photos/' + str(
                        num_seed) + '.jpg'
                    outputpath = '/home/pi/Desktop/photos/' + str(
                        num_seed) + 'new.jpg'
                    execution_path = os.getcwd()
                    detector = ObjectDetection()
                    detector.setModelTypeAsRetinaNet()
                    detector.setModelPath(
                        os.path.join(execution_path,
                                     'resnet50_coco_best_v2.0.1.h5'))
                    detector.loadModel()
                    a = time.time()

                    custom_objects = detector.CustomObjects(bottle=True)

                    detections = detector.detectCustomObjectsFromImage(
                        custom_objects=custom_objects,
                        input_image=imagepath,
                        output_image_path=outputpath,
                        minimum_percentage_probability=50,
                        box_show=True)
                    b = time.time()
                    print('the time is {}'.format(b - a))
                    print('the direction is {}'.format(
                        detections[0]['direction']))
                    for eachObject in detections:
                        print(eachObject['name'] + ':' +
                              eachObject['percentage_probability'])

                elif event.key == K_t:
                    num_seed = camera.capture(num_seed)

                elif event.key == K_q:
                    camera.stop()
                    print("==End of Photograph==")
                elif event.key == K_o:
                    camera.start()
                    print("==Begin of Photograph==")
                elif event.key == K_r:
                    camera.record()
                    # render()将文本渲染成Surface对象
                # 第一个参数是带渲染的文本
                # 第二个参数指定是否消除锯齿
                # 第三个参数指定文本的颜色
                screen.blit(font.render(str(event), True, (0, 255, 0)),
                            (0, position))
                position += line_height
                if position >= height:
                    position = 0
                    screen.fill(bgColor)
                pygame.display.flip()
Esempio n. 30
0
class Vaico_helmet_detection:
    def __init__(self,
                 yolo_weigths='../../models_h5/yolo.h5',
                 model_weigths='../../models_h5/model_ex-055_acc-0.996250.h5',
                 model_json='../../models_h5/model_class.json'):
        self.detector = ObjectDetection()
        self.detector.setModelTypeAsYOLOv3()
        self.detector.setModelPath(yolo_weigths)
        self.detector.loadModel()

        self.classifier = CustomImagePrediction()
        self.classifier.setModelTypeAsResNet()
        self.classifier.setModelPath(model_weigths)
        self.classifier.setJsonPath(model_json)
        self.classifier.loadModel(num_objects=2)

        self.current_detection = []

    def get_current_detection(self):
        return self.current_detection

    def set_current_detection(self, current_detection):
        self.current_detection = current_detection

    def find_persons(self, img_base64, margin=0.01):
        detections = self.detector.detectObjectsFromImage(
            input_image='static/img/temp_img.jpg',
            minimum_percentage_probability=30)
        os.remove('.png')
        img = cv2.imread('static/img/temp_img.jpg')
        persons_in_image = []
        count = 0

        for each_object in detections:
            name = each_object["name"]
            if (name == "person"):
                x1, y1, x2, y2 = each_object["box_points"]
                height, width, _ = img.shape
                """
                print(height, width)#debug
                print(x1,x2,y1,y2)#debug
                """

                x1_new = 0
                x2_new = 0
                y1_new = 0
                y2_new = 0

                if (y1 - (y1 * margin)) < 0:
                    y1_new = y1
                else:
                    y1_new = int((y1 - (y1 * margin)))

                if (x1 - (x1 * margin)) < 0:
                    x1_new = x1
                else:
                    x1_new = int((x1 - (x1 * margin)))

                if (y2 + (y2 * margin)) > height:
                    y2_new = y2
                else:
                    y2_new = int((y2 + (y2 * margin)))

                if (x2 + (x2 * margin)) > width:
                    x2_new = x2
                else:
                    x2_new = int((x2 + (x2 * margin)))

                person = img[y1_new:y2_new, x1_new:x2_new]
                person_path = 'static/img/test{0}.jpg'.format(count)
                count += 1
                cv2.imwrite(person_path, person)
                person_points = (person, (y1_new, y2_new, x1_new, x2_new),
                                 person_path)
                persons_in_image.append(person_points)

        return persons_in_image

    def load_image_for_model(self, image_path):
        np_image = Image.open(image_path)
        np_image = np.array(np_image).astype('float32') / 255
        np_image = transform.resize(np_image, (350, 350, 3))
        np_image = np.expand_dims(np_image, axis=0)
        return np_image

    def predict_on_image(self, image):
        res = self.classifier.predictImage(image)
        print(
            res,
            '-------------------------debug---------------------------------\n borrar est en /app/model_test.py metodo predict_on_image()'
        )
        return res

    # def prediction_map(self,res,threshold=0.8):
    #     if res[0][0] > threshold:
    #         return 'Tiene casco'
    #     else:
    #         return 'No tiene Casco'

    def get_image_from_base64(self, base64_str):
        #b64_string = base64_str.decode()
        img_temp = imread(io.BytesIO(base64.b64decode(base64_str)))
        cv2_img = cv2.cvtColor(img_temp, cv2.COLOR_RGB2BGR)
        cv2.imwrite("static/img/temp_img.jpg", cv2_img)

    def compute_current_detection(self, img_path='static/img/temp_img.jpg'):
        res = self.find_persons(img_path)
        current_detection = []
        for _, coord, path in res:
            label = self.predict_on_image(path)[0][0]
            current_detection.append((label, coord, path))

        self.set_current_detection(current_detection)

    def draw_boundig_box(self,
                         data,
                         original_img_path='static/img/temp_img.jpg'):
        img = cv2.imread(original_img_path)

        for label, coords, _ in data:
            y1, y2, x1, x2 = coords
            if label == 'Tiene casco':
                cv2.rectangle(img, (x1, y1), (x2, y2), (0, 255, 0),
                              thickness=3)
                cv2.putText(img,
                            'Tiene casco', (x1, y1 - 7),
                            cv2.FONT_HERSHEY_SIMPLEX,
                            1.0, (255, 255, 255),
                            thickness=3,
                            lineType=cv2.LINE_AA)
            else:
                cv2.rectangle(img, (x1, y1), (x2, y2), (0, 0, 255),
                              thickness=3)
                cv2.putText(img,
                            'No tiene casco', (x1, y1 - 7),
                            cv2.FONT_HERSHEY_SIMPLEX,
                            1.0, (255, 255, 255),
                            thickness=3,
                            lineType=cv2.LINE_AA)

        return img  #return cv2.imwrite('./out.jpg' , img)#return img #return cv2.imwrite('./out.jpg' , img)

    def clear_temp_imgs(self):
        for _, _, path in self.current_detection:
            os.remove(path)
        os.remove('static/img/temp_img.jpg')
class StartWindows(QMainWindow):
    def __init__(self, camera=None, parent=None):
        super(StartWindows, self).__init__(parent=parent)
        self.ui = Ui_Form()
        self.ui.setupUi(self)
        #detector

        #button
        self.ui.pushButton.clicked.connect(self.start)
        self.ui.pushButton_2.clicked.connect(self.stop)

        #camera
        self.camera = cv2.VideoCapture(0)
        #timer
        self.update_timer = QTimer()
        self.update_timer.timeout.connect(self.update)

    def start(self):
        model = self.ui.comboBox.currentText()
        print(model)

        if model == "YOLO V3":
            self.yolo()
        elif model == "YOLO TINY":
            self.yolo_tiny()
        elif model == "RESNET":
            self.resnet()

    def yolo(self):
        self.update_timer.start(30)
        self.execution_path = os.getcwd()
        self.detector = ObjectDetection()
        self.detector.setModelTypeAsYOLOv3()
        self.detector.setModelPath(os.path.join(self.execution_path,
                                                "yolo.h5"))
        self.detector.loadModel(detection_speed="flash")
        print("###you are use yolo model###")

    def yolo_tiny(self):
        self.update_timer.start(30)
        self.execution_path = os.getcwd()
        self.detector = ObjectDetection()
        self.detector.setModelTypeAsTinyYOLOv3()
        self.detector.setModelPath(
            os.path.join(self.execution_path, "yolo-tiny.h5"))
        self.detector.loadModel(detection_speed="flash")
        print("###you are use yolo_tiny model###")

    def resnet(self):
        self.update_timer.start(30)
        self.execution_path = os.getcwd()
        self.detector = ObjectDetection()
        self.detector.setModelTypeAsRetinaNet()
        self.detector.setModelPath(
            os.path.join(self.execution_path, "resnet50_coco_best_v2.0.1.h5"))
        self.detector.loadModel(detection_speed="fastest")
        print("###you are use resnet model###")

    def stop(self):
        self.update_timer.stop()

    def update(self):

        ret, frame = self.camera.read()
        frame = cv2.flip(frame, 1)
        #detected
        custom = self.ui.comboBox_2.currentText()
        print(custom)
        if custom == "Person":
            custom_objects = self.detector.CustomObjects(person=True)
        elif custom == "orange":
            custom_objects = self.detector.CustomObjects(orange=True)
        elif custom == "Cell Phone":
            custom_objects = self.detector.CustomObjects(cell_phone=True)

        detected_image_array, detections = self.detector.detectCustomObjectsFromImage(
            custom_objects=custom_objects,
            input_type="array",
            input_image=frame,
            output_type="array")

        #detected_image_array, detections = self.detector.detectCustomObjectsFromImage(custom_objects=custom_objects,output_type="array",input_type="array", input_image= frame,display_percentage_probability=True, display_object_name=True)
        for eachObject in detections:
            print(eachObject["name"], " : ",
                  eachObject["percentage_probability"], " : ",
                  eachObject["box_points"])

        #resize
        detected_image_array = cv2.resize(detected_image_array, (801, 391))
        height, width, channel = detected_image_array.shape
        bytesPerLine = 3 * width

        qImg = QImage(detected_image_array.data, width, height, bytesPerLine,
                      QImage.Format_RGB888).rgbSwapped()
        pixmap01 = QPixmap.fromImage(qImg)
        pixmap_image = QPixmap(pixmap01)
        self.ui.label.setPixmap(pixmap_image)
        self.ui.label.show()