def test_object_detection_yolov3_array_io():

    image_input_array = cv2.imread(image_input)

    detector = CustomObjectDetection()
    detector.setModelTypeAsYOLOv3()
    detector.setModelPath(model_path)
    detector.setJsonPath(model_json)
    detector.loadModel()
    detected_array, results = detector.detectObjectsFromImage(
        input_image=image_input_array,
        input_type="array",
        minimum_percentage_probability=40,
        output_type="array")

    assert isinstance(detected_array, ndarray)
    assert isinstance(results, list)
    for result in results:
        assert isinstance(result["name"], str)
        assert isinstance(result["percentage_probability"], float)
        assert isinstance(result["box_points"], list)

    detected_array, results2, extracted_arrays = detector.detectObjectsFromImage(
        input_image=image_input,
        output_image_path=image_output,
        minimum_percentage_probability=40,
        extract_detected_objects=True,
        output_type="array")

    assert isinstance(results2, list)
    assert isinstance(extracted_arrays, list)
    for result2 in results2:
        assert isinstance(result2["name"], str)
        assert isinstance(result2["percentage_probability"], float)
        assert isinstance(result2["box_points"], list)

    for extracted_array in extracted_arrays:
        assert isinstance(extracted_array, ndarray)
class FindShips():
    PNG_PATH = "data/images_png/"
    TXT_PATH = "data/info_txt/"
    LEARN_PATH = "data/learn/"
    OUT_PATH = "data/out_processed_file/"

    def __init__(self):
        execution_path = os.getcwd()
        self.detector = CustomObjectDetection()
        self.detector.setModelTypeAsYOLOv3()
        self.detector.setModelPath("detection_model-ex-044--loss-0022.165.h5")
        self.detector.setJsonPath("detection_config.json")
        self.detector.loadModel()

    def searchImages(self, nameFile):
        ships = []
        outfile = nameFile.split('.')[0] + 'processed.png'
        detections = self.detector.detectObjectsFromImage(
            input_image=self.LEARN_PATH + nameFile,
            output_image_path=self.OUT_PATH + outfile,
            minimum_percentage_probability=50)
        print("save: " + outfile)
        outfileTxt = nameFile.split('.')[0] + 'corners.txt'
        f = open(self.TXT_PATH + outfileTxt, "r")
        moveX = float(f.readline())
        moveY = float(f.readline())
        f.close()

        counter = 0
        for detection in detections:
            x1, y1, x2, y2 = detection["box_points"]
            x = (x2 + x1) / 2
            y = (y2 + y1) / 2
            x += moveX
            y += moveY
            ships.append((x, y))
            counter += 1
        return ships
def predict_yolov3(data_path, model_path, json_path):
    # source: https://github.com/OlafenwaMoses/ImageAI/blob/master/examples/custom_detection_array_input_output.py
    #img = cv2.imread("data\\two_class_face_detection\\test\\images\\maksssksksss327.png")
    #img = cv2.imread("data\\friends.jpg")
    predictions = {
    }  # dictionary mapping image name to a list of all objects detected (each object is a dictionary)
    model = CustomObjectDetection()
    model.setModelTypeAsYOLOv3()
    model.setModelPath(model_path)
    model.setJsonPath(os.path.join(json_path, "detection_config.json"))
    model.loadModel()

    test_images_path = os.path.join(data_path, "test", "images")
    if not os.path.isdir(test_images_path):
        raise Exception(
            "[Error]: Directory {} does not exist".format(test_images_path))

    for path in paths.list_images(test_images_path):
        img = cv2.imread(path)
        # get image name with extension removed
        img_name = os.path.splitext(path.split(os.path.sep)[-1])[0]

        #detected_image, detections = model.detectObjectsFromImage(input_image=img, input_type="array", output_type="array", minimum_percentage_probability=30)
        detections = model.detectObjectsFromImage(
            input_image=img,
            input_type="array",
            output_image_path=os.path.join(YOLOv3_OUTPUT_PREDICTIONS,
                                           img_name + "_detected.png"),
            minimum_percentage_probability=30)

        predictions[img_name] = detections
        #for eachObject in detections:
        #    print(eachObject["name"], " : ", eachObject["percentage_probability"], " : ", eachObject["box_points"])

        #cv2.imshow("Main Image", detected_image)
        #cv2.waitKey()
        #cv2.destroyAllWindows()
    return predictions
def detect_objects_yolo_custom(filename):

    detector = CustomObjectDetection()
    detector.setModelTypeAsYOLOv3()
    detector.setModelPath(
        "examples/specjale/models/detection_model-ex-008--loss-0023.046.h5")
    detector.setJsonPath("examples/specjale/json/detection_config.json")
    detector.loadModel()

    detections = detector.detectObjectsFromImage(
        input_image=filename,
        output_image_path=os.path.join(
            'output/images/',
            datetime.now().strftime("%H_%M_%S") + "_yolo_detected.jpg"),
        minimum_percentage_probability=probability,
        extract_detected_objects=False)

    for eachObject in detections:
        print(eachObject["name"], " : ", eachObject["percentage_probability"],
              " : ", eachObject["box_points"])
        print("--------------------------------")
    # De-allocate any associated memory usage
    cv2.destroyAllWindows()
示例#5
0
def test(img_path, img_out, minimum_percentage_probability=10):
    """
    Recognition test
    :param img_path \\Image file path
    :param img_out \\Result image path
    :param minimum_percentage_probability \\Minimum similarity of graphics
    """
    print('test: {} -> {}'.format(img_path, img_out))
    
    '''
    /***************************************************************************************
*    Title: ImageAI : Custom Object Detection
*    Author: ImageAI Developers
*    Date: 2019
*    Code version: latest
*    Availability: https://github.com/OlafenwaMoses/ImageAI/blob/master/imageai/Detection/Custom/CUSTOMDETECTION.md
*
***************************************************************************************/'''

    # Load the model
    detector = CustomObjectDetection()
    detector.setModelTypeAsYOLOv3()
    detector.setModelPath('drive/MyDrive/train_data/circle/models/detection_model-ex-138--loss-0014.575.h5')
    detector.setJsonPath('drive/MyDrive/train_data/circle/models/detection_config.json')
    detector.loadModel()

    # Detect
    detections = detector.detectObjectsFromImage(
        input_image=img_path,
        output_image_path=img_out,
        minimum_percentage_probability=minimum_percentage_probability
    )

    # Print the result
    for detection in detections:
        print("{}: {} -> {}".format(detection['name'], detection['percentage_probability'], detection['box_points']))
示例#6
0
	if ('ball' in coordinates):
		ballX = coordinates['ball'][0]['x1']
		ballY = coordinates['ball'][0]['y1']
		for teamNumber in coordinates:
			if (teamNumber == 'team1') or (teamNumber == 'team2'):
				for playerIndex in coordinates[teamNumber]:
					playerX = coordinates[teamNumber][playerIndex]['x2']
					playerY = coordinates[teamNumber][playerIndex]['y2']
					if ((playerX-ballX)**2+(playerY-ballY)**2 < minDistance):
						minDistance = (playerX-ballX)**2+(playerY-ballY)**2
						closestPlayer = coordinates[teamNumber][playerIndex]
						closestPlayerTeam = teamNumber
	return closestPlayerTeam

detector = CustomObjectDetection()
detector.setModelTypeAsYOLOv3()
detector.setModelPath("detection_model-ex-112--loss-0006.162.h5") 
detector.setJsonPath("detection_config.json")
detector.loadModel()
        
#Read the video frame by frame
while success:

	success,image = vidcap.read()
	count += 1
	print(image.shape)
	coordinates = {}
	number_team1_players = 0
	number_team2_players = 0
	number_ball = 0
	action = ''
class Process:
    def __init__(self):
        known_path = '/home/swc/spark-2.4.5-bin-hadoop2.7/CCTV-pyspark/images/' # directory path of know_faces
        image_format = 'jpg'

        known_list = os.listdir(known_path)
        self.known_faces = []

        # Load & encode all images from known_path
        for f in known_list : 
            if f.split('.')[-1] != image_format : continue
            known_img = face_recognition.load_image_file(known_path+f)
            known_img_encoding = face_recognition.face_encodings(known_img)[0]
            self.known_faces.append(known_img_encoding)

        self.execution_path = "/home/swc/spark-2.4.5-bin-hadoop2.7/CCTV-pyspark/"

        self.detector = CustomObjectDetection()
        self.detector.setModelTypeAsYOLOv3()
        # self.detector.setModelPath(detection_model_path=os.path.join(self.execution_path, "detection_model-ex-33--loss-4.97.h5"))
        # self.detector.setJsonPath(configuration_json=os.path.join(self.execution_path, "detection_config.json"))
        self.detector.setModelPath(detection_model_path=os.path.join("/home/swc/spark-2.4.5-bin-hadoop2.7/CCTV-pyspark/", "detection_model-ex-33--loss-4.97.h5"))
        self.detector.setJsonPath(configuration_json=os.path.join("/home/swc/spark-2.4.5-bin-hadoop2.7/CCTV-pyspark/", "detection_config.json"))
        self.detector.loadModel()

    def ProcessImage(self, data):
        encoded_image = data

        #base64 to image(uint8) decoding
        img64_decode = base64.b64decode(encoded_image)
        im_arr = np.frombuffer(img64_decode, dtype=np.uint8)
        decoded_img = cv2.imdecode(im_arr, flags=cv2.IMREAD_COLOR)

        face = self.FaceRecognition(decoded_img) #True면 침입자
        fire = self.FireDetection(decoded_img) #True면 화재발생

        result= {"unknown_person" : face, "fire_broken" : fire}
        print(result)
        return result

    def FaceRecognition(self, decoded_img):
        #encoding frame
        try :
            unknown_face_encoding = face_recognition.face_encodings(decoded_img)[0]
            # results is an array of True/False telling if the unknown face matched anyone in the known_faces array
            # 아는 얼굴이면 False, 모르는 얼굴이면 True
            results = face_recognition.compare_faces(self.known_faces, unknown_face_encoding)
            return not True in results
        except IndexError:
            # print("얼굴없음")
            return False

    def FireDetection(self, decoded_img):
        detections = self.detector.detectObjectsFromImage(input_image=decoded_img, input_type="array",
                                                    output_image_path=os.path.join(self.execution_path, "fire_detected.jpg"),
                                                    minimum_percentage_probability=40)

        if len(detections) == 0 : fire_broken = False
        else : fire_broken = True
        
        return fire_broken
        '''
示例#8
0
class Detector:
    """
        Handles object detection using custom YoloV3 model
    """
    def __init__(self):
        # self.BASE_IP = "http://192.168.8.101:4747/"  # For home wifi
        # self.BASE_IP = "http://192.168.0.100:4747/"  # For dormitory wifi
        self.BASE_IP = "http://10.42.0.32:4747/"  # For hotspot connection
        self.CAM_ADDR = self.BASE_IP + "video"  # NOTE: might change
        self.CAM_FORCE_ADDR = self.BASE_IP + "override"  # NOTE: might change
        self.CAM_FOCUS_ADDR = self.BASE_IP + "cam/1/af"  # NOTE: might change
        self.DIR_MODEL = "detection_model-ex-015--loss-0001.342.h5"  # TODO NOTE: might change
        self.DIR_CONFIG = "detection_config.json"  # NOTE: might change
        self.MIN_PROB = 30  # TODO NOTE: Change to sensible value to prevent false positives

        self.cap = None
        self.count = 1
        self.detector = None
        self.frame_out = None
        self.frame = None
        self.detections = {}
        self.best_detection = None
        self.success = None

        self.image_logger = ImageLogger()
        self.cap = BufferlessVideoCapture(self.CAM_ADDR, self.CAM_FORCE_ADDR,
                                          self.CAM_FOCUS_ADDR)

        if FAST_DEBUG is False:
            self.init_object_detection()
        else:
            print("FAST DEBUG: detector loading skipped")

    def init_object_detection(self):
        print("Initializing object detection model: {}".format(self.DIR_MODEL))
        self.detector = CustomObjectDetection()
        self.detector.setModelTypeAsYOLOv3()
        self.detector.setJsonPath(self.DIR_CONFIG)
        self.detector.setModelPath(self.DIR_MODEL)
        self.detector.loadModel()
        print("Model loaded")

    def detect_objects(self):
        """
        :return: Success(bool), ImageAI detection dictionary of highest probability detection, time when frame was acquired
            or returns None if nothing detected
        """
        self.best_detection = None
        self.detections = None
        self.frame_out = None
        self.frame = None
        frame_time = None
        self.success = True

        self.frame, frame_time = self.cap.read()  # Get new frame from camera
        if self.frame is not None:
            if FAST_DEBUG is True:
                print("FAST DEBUG: detection skipped")
                time.sleep(0.8)
            else:
                self.frame_out, self.detections = self.detector.detectObjectsFromImage(
                    input_type="array",
                    input_image=self.frame,
                    output_type="array",
                    minimum_percentage_probability=self.MIN_PROB)
            self.count += 1

        # Get highest probability detection
        if self.detections:
            self.best_detection = max(
                self.detections, key=lambda x: x["percentage_probability"])
            self.best_detection["total_detections_number"] = len(
                self.detections)

        if self.frame is None:
            self.success = False

        return self.success, self.best_detection, frame_time, self.detections

    def log_detections(self, debug_image, text=None):
        """
        Logs/Saves frame with detection boxes or if no detection just frame without boxes
        :param debug_image: if true saves latest frame to disk
        :param text: text to write on image
        """
        if self.success or (FAST_DEBUG and self.frame is not None):
            frame_to_save = None
            if self.best_detection and self.frame_out is not None:
                # Draw best detection in green
                d = self.best_detection["box_points"]
                frame_to_save = cv2.rectangle(self.frame_out, (d[0], d[1]),
                                              (d[2], d[3]), (0, 255, 0), 2)
                frame_to_save = cv2.circle(frame_to_save, ((d[0] + d[2]) // 2,
                                                           (d[1] + d[3]) // 2),
                                           5,
                                           color=(0, 255, 0),
                                           thickness=1)
            else:
                frame_to_save = self.frame

            if isinstance(text, str):
                frame_to_save = cv2.putText(frame_to_save, text, (5, 35),
                                            cv2.FONT_HERSHEY_SIMPLEX, 1.0,
                                            (0, 0, 255), 2, cv2.LINE_AA)

            self.image_logger.log(frame_to_save)

            if debug_image is True:
                cv2.imwrite("latest_detection_debug.jpg", frame_to_save)
                print("Image saved as latest_detection_debug.jpg")

    def print_detections(self):
        if self.detections:
            for det in self.detections:
                print(
                    "{d[name]} : {d[percentage_probability]} : {d[box_points]}"
                    .format(d=det))
        else:
            print("No object detected")
示例#9
0
import io
import cv2
import sys
from PIL import Image
import numpy as np
from flask import Flask, request, jsonify
from imageai.Detection.Custom import CustomObjectDetection

MODEL_PATH = "../models/" + sys.argv[1]
JSON_PATH = "../json/" + sys.argv[2]

app = Flask(__name__)

dtc = CustomObjectDetection()
dtc.setModelTypeAsYOLOv3()
dtc.setModelPath(MODEL_PATH)
dtc.setJsonPath(JSON_PATH)
dtc.loadModel()


def detect_objects(img, min_prob=30):
    """Processes and transforms product images to detect objects learned
    by the model loaded in global.

    Arguments:
        img {FileStorage} -- Uploaded image file though api request.

    Keyword Arguments:
        min_prob {int} -- Threshold probability for objects detected in
        images. Low probability levels show more detections
        (default: {30}).
def detectImage(queue):

    detector = CustomObjectDetection()
    detector.setModelTypeAsYOLOv3()
    detector.setModelPath(modelFilePath)
    detector.setJsonPath("detection_config.json")
    detector.loadModel()

    while True:

        if queue.qsize() > 0:

            try:
                fileNameIn = queue.get()
                logging.info("[detectImage] Processing %s" % fileNameIn)

                filePathIn = os.path.join(imageFolderIn, fileNameIn)
                filePathOut = os.path.join(imageFolderOut, fileNameIn)
                filePathStorage = os.path.join(imageStorage, fileNameIn)
                logging.info(filePathOut)
                filePathSpare = os.path.join(spareImageFolder, fileNameIn)
                filepathBorder = os.path.join(borderImageFolder, fileNameIn)

                logging.info("\n---------------------------------------")
                logging.info("moving to next image")
                if borderTopBtm:
                    logging.info("filepathBorder: " + filepathBorder)
                else:
                    logging.info("filePathIn: " + filePathIn)
                logging.info("---------------------------------------")

                if borderTopBtm:
                    borderImage = cv2.imread(filePathIn)
                    #border bottom
                    cv2.rectangle(borderImage, (0, 525), (1280, 720),
                                  (0, 0, 0), -1)
                    ##border top
                    cv2.rectangle(borderImage, (0, 0), (1280, 140), (0, 0, 0),
                                  -1)
                    cv2.imwrite(filepathBorder, borderImage)

                if borderTopBtm:
                    logging.info("[YOLO] Processing %s..." % filepathBorder)
                    detections = detector.detectObjectsFromImage(
                        imageIn=filepathBorder,
                        imageOut=filePathSpare,
                        minProbPerc=imageMinProb)
                else:
                    logging.info("[YOLO] Processing %s..." % filePathIn)
                    detections = detector.detectObjectsFromImage(
                        imageIn=filePathIn,
                        imageOut=filePathSpare,
                        minProbPerc=imageMinProb)

                probPercBest = 0
                closestName = None
                bestBB = None

                img = cv2.imread(filePathIn)

                for detection in detections:
                    name = detection["name"]
                    probPerc = detection["probPerc"]
                    bbPoints = detection["bbPoints"]
                    logging.info("name=%s, probPerc=%s, bbPoints=%s" %
                                 (name, probPerc, bbPoints))

                    if probPerc > probPercBest:
                        probPercBest = probPerc
                        closestName = name
                        bestBB = bbPoints

                if bestBB is None:
                    logging.info("Nothing is detected, skipping image...")
                    shutil.copy2(filePathIn, filePathOut)
                    shutil.copy2(filePathIn, filePathStorage)
                    continue

                if probPercBest >= imageMaxProb:
                    logging.info("Model is sure that closestName is " +
                                 closestName)
                    logging.info("fileNameIn: %s" % fileNameIn)
                    logging.info("fileNameIn.split('_'): %s" %
                                 fileNameIn.split("_"))

                    robotCurrentX, robotCurrentY, robotCurrentDirection = fileNameIn.split(
                        "-")[1].split("_")[0], fileNameIn.split("-")[1].split(
                            "_")[1], fileNameIn.split("-")[1].split(
                                "_")[2].split('.')[0]
                    exportImageAbs(img, filePathOut, filePathStorage, bestBB,
                                   closestName, robotCurrentX, robotCurrentY,
                                   robotCurrentDirection)
                    continue
                else:
                    logging.warn("Model is not confident")
                    shutil.copy2(filePathIn, filePathOut)
                    shutil.copy2(filePathIn, filePathStorage)
                    continue

            except Exception as e:
                logging.error("\nException %s\n" % e)
                traceback.print_exc()

                if "!_img.empty()" in str(e):
                    logging.error("Model does not detect images ")
                    if os.path.exists(filePathIn):
                        shutil.copy2(filePathIn, filePathOut)
                        shutil.copy2(filePathIn, filePathStorage)
                    continue