Example #1
0
def detection(input_path, output_path, yolo_model_path):
    """The function opens the input video and goes through each frame.
       Performs object recognition for each frame by using a YOLO model
       and writes the frame including the detection to the output video.
       :param input_path:      input video path
       :type input_path:       string
       :param output_path:     output video path
       :type output_path:      string
       :param yolo_model_path: YOLO model path
       :type yolo_model_path:  string
       :return: None
    """
    detector = VideoObjectDetection()

    # this function sets the model type of the object
    # detection instance you created to the YOLOv3 model
    detector.setModelTypeAsYOLOv3()

    # this function accepts a string that must be the
    # path to the model file, it must correspond to the
    # model typeset for the object detection instance
    detector.setModelPath(yolo_model_path)

    # this function loads the model from the path given
    detector.loadModel()

    # the function performs object detection on a video
    # file or video live-feed after the model has been
    # loaded into the instance that was created
    detector.detectCustomObjectsFromVideo(input_file_path=input_path,
                                          output_file_path=output_path,
                                          frames_per_second=20,
                                          log_progress=True)
Example #2
0
def detection_of_vehicles_from_video(folder1, folder2, findex):
    '''
    Detects and saves the arrays containing bounding boxes of detected
    vehicles from videos of a given folder

    Parameters:
    folder1 : path of the folder containing videos
    folder2 : path of the folder in which arrays are required to be stored
    findex : index number of the first video in folder1 
    '''

    #modifying forFrame function of ImageAI to make a list
    #of bounding box coordinates for vehichles detected in a
    #particular frame
    def forFrame(frame_number, output_array, output_count):

        bboxes = []

        for i in range(len(output_array)):
            bboxes.append(list(output_array[i]['box_points']))

        B.append(bboxes)

    #reading and sorting the filenames of folder1
    videos = glob.glob(folder1 + '/video*.MOV')
    videos = natsort.natsorted(videos)

    #set and load ResNet Model for detection of vehicles
    execution_path = os.getcwd()
    detector = VideoObjectDetection()
    detector.setModelTypeAsRetinaNet()
    #use detector.setModelTypeAsYOLOv3() to use YOLOv3 instead of RetinaNet
    detector.setModelPath(
        os.path.join(
            execution_path,
            "/home/siddhi/Desktop/RoadCrossingAssistant_FY_Project_Data/resnet50_coco_best_v2.0.1.h5"
        ))
    #use model path of yolo.h5 if to use YOLOv3 instead of RetinaNet
    detector.loadModel()
    custom_objects = detector.CustomObjects(bicycle=True,
                                            motorcycle=True,
                                            car=True,
                                            truck=True)

    for video in videos:
        print('processing' + video)
        B = []
        detector.detectCustomObjectsFromVideo(
            save_detected_video=False,
            custom_objects=custom_objects,
            input_file_path=os.path.join(execution_path, video),
            frames_per_second=30,
            per_frame_function=forFrame,
            minimum_percentage_probability=40)
        B = np.array(B)
        print('saving array for video' + video + '\n shape of array: ' +
              str(B.shape))
        np.save(folder2 + '/array' + str(findex), B)
        findex = findex + 1
Example #3
0
    def Crop_video(self, params):
        global input_video

        if 'videoName' in params.keys():
            input_video = str(params['videoName'])
            print(input_video)
        else:
            return "Error: No Video Name field provided. Please specify an url."
        detector = VideoObjectDetection()
        detector.setModelTypeAsYOLOv3()
        detector.setModelPath(
            os.path.join(parent, 'Mobilaty\\project\\public\\yolo.h5'))
        detector.loadModel()
        custom_objects = detector.CustomObjects(cell_phone=True)

        video_path = detector.detectCustomObjectsFromVideo(
            custom_objects=custom_objects,
            input_file_path=os.path.join(Base_Video_path, input_video),
            output_file_path=os.path.join(Base_Video_path,
                                          "traffic_custom_detected"),
            save_detected_video=False,
            frames_per_second=1,
            per_frame_function=forFrame)
        os.remove(os.path.join(Base_Video_path, "traffic_custom_detected"))
        return "Done!"
Example #4
0
def objectDection(execution_path, save_path, fileName):
    '''
        detecting object for each frame
    '''
    detector = VideoObjectDetection()
    detector.setModelTypeAsYOLOv3()
    detector.setModelPath(os.path.join(execution_path, "yolo.h5"))
    detector.loadModel()

    global FILENAME
    FILENAME = fileName.split('.')[0]

    custom_objects = detector.CustomObjects(car=True, truck=True, bus=True)
    video_path = detector.detectCustomObjectsFromVideo(
        custom_objects=custom_objects,
        input_file_path=os.path.join(execution_path, fileName),
        output_file_path=os.path.join(save_path,
                                      fileName.split(".")[0] + "_detected"),
        frames_per_second=30,
        frame_detection_interval=1,
        per_frame_function=forFrame,
        #                                    per_second_function= forSecond,
        minimum_percentage_probability=79,
        #                                    return_detected_frame=True,
        log_progress=True)
    return (fileName.split(".")[0] + "_detected.avi")
class itemVideoSaveFile():
    def __init__(self,setModelPath):
        self.execution_path = os.getcwd()
        self.detetor = VideoObjectDetection()
        self.detetor.setModelTypeAsRetinaNet()
        self.detetor.setModelPath(os.path.join(self.execution_path,setModelPath))
        self.detetor.loadModel()
    
    def items_VideoSaveFile(self, inputFile, outputFile):
        video_path = self.detetor.detectCustomObjectsFromVideo(input_file_path=os.path.join(self.execution_path,inputFile),
        output_file_path=os.path.join(self.execution_path, outputFile),
        frames_per_second=20, log_progress=True)
        print(video_path)
    print("SECOND : ", second_number)
    print("Array for the outputs of each frame ", output_arrays)
    print("Array for output count for unique objects in each frame : ", count_arrays)
    print("Output average count for unique objects in the last second: ", average_output_count)
    print("------------END OF A SECOND --------------")

#-------------------------------------------------------------------------------#
detector = VideoObjectDetection()
detector.setModelTypeAsYOLOv3()
detector.setModelPath(os.path.join(execution_path,"yolo.h5"))
detector.loadModel(detection_speed="fastest")
plt.show()

custom_objects = detector.CustomObjects(car=True,truck=True,motorcycle=True)

#--------------------------------Features---------------------------------------#
video_path = detector.detectCustomObjectsFromVideo(
minimum_percentage_probability=50,
custom_objects=custom_objects,
per_second_function=forSeconds,
display_percentage_probability=True,
display_object_name=True,
log_progress=True,
frames_per_second=60,
#--------------------Input-File--------------------------------------------#
input_file_path=os.path.join(execution_path,"cars.mp4"),
#--------------------Output-File-----------------------------------------#
output_file_path=os.path.join(execution_path, "car_detection"))
#---------------------Finish------------------------------------------#
print(video_path)
Example #7
0
from imageai.Detection import VideoObjectDetection
import os

execution_path = os.getcwd()

detector = VideoObjectDetection()
detector.setModelTypeAsRetinaNet()
detector.setModelPath( os.path.join(execution_path , "../models/resnet50_coco_best_v2.0.1.h5"))
detector.loadModel()

custom = detector.CustomObjects(person=True, motorcycle=True, bus=True)

video_path = detector.detectCustomObjectsFromVideo(custom_objects=custom, input_file_path=os.path.join(execution_path, "traffic-mini.mp4"),
                                output_file_path=os.path.join(execution_path, "traffic-mini_detected_custom")
                                , frames_per_second=20, log_progress=True)
print(video_path)
Example #8
0
from imageai.Detection import VideoObjectDetection
import os
import time

execution_path = os.getcwd()
s = time.time()
detector = VideoObjectDetection()
detector.setModelTypeAsYOLOv3()
detector.setModelPath(os.path.join(execution_path, "yolo.h5"))
detector.loadModel(detection_speed="faster")

custom_objects = detector.CustomObjects(person=True)

video_path = detector.detectCustomObjectsFromVideo(
    custom_objects=custom_objects,
    input_file_path=os.path.join(execution_path, "1080p_test.mp4"),
    output_file_path=os.path.join(execution_path, "custom_detected1080"),
    frames_per_second=30,
    log_progress=True)
print(video_path)
e = time.time()
timetaken = e - s
print(timetaken)
# -*- coding: utf-8 -*-
"""
Created on Sat Mar 23 02:17:08 2019

@author: aryaman
"""

from imageai.Detection import VideoObjectDetection
import os
import cv2

execution_path = os.getcwd()

camera = cv2.VideoCapture(0)

detector = VideoObjectDetection()
detector.setModelTypeAsYOLOv3()
detector.setModelPath(os.path.join(execution_path, "yolo(1).h5"))
detector.loadModel()

custom_objects = detector.CustomObjects(person=True)

video_path = detector.detectCustomObjectsFromVideo(
    custom_objects=custom_objects,
    camera_input=camera,
    output_file_path=os.path.join(execution_path, "camera_detected_1"),
    frames_per_second=12,
    log_progress=True)
print(video_path)
camera.release()
Example #10
0
def forMinute(minute_number, output_arrays, count_arrays,
              average_output_count):
    print("MINUTE : ", minute_number)
    print("Array for the outputs of each frame ", output_arrays)
    print("Array for output count for unique objects in each frame : ",
          count_arrays)
    print("Output average count for unique objects in the last minute: ",
          average_output_count)
    print("------------END OF A MINUTE --------------")


def forFull(output_arrays, count_arrays, average_output_count):
    print("Array for the outputs of each frame ", output_arrays)
    print("Array for output count for unique objects in each frame : ",
          count_arrays)
    print("Output average count for unique objects in the entire video: ",
          average_output_count)
    print("------------END OF THE VIDEO --------------")


# Webcam live object detection
video_path = detector.detectCustomObjectsFromVideo(
    custom_objects=custom_objects,
    camera_input=video_capture,
    output_file_path="output/testvid",
    frames_per_second=20,
    per_second_function=forSeconds,
    minimum_percentage_probability=70,
    detection_timeout=120)
Example #11
0
    The video file obtained from    the detection is saved in the working directory as detected_video.avi
    @:param: custom_objects: dictionary of objects enabled for detection
    @:param: return_detected_frame : To obtain the last detected video frame into the per_per_frame_function
    @:param: per_frame_function : see process_per_frame documentation
    @:param: camera_input : live camera input
    @:param: save_detected_video : Option to save detected video
    @:param: output_file_path : path and filename for saving detected video
    @:param: minimum_percentage_probability : minimum percentage probability for nominating a detected object
    @:param: frames_per_second: Frames per second
    @:param: log_process : states if the progress of the frame processed is to be logged to console
    """
    video_path = detector.detectCustomObjectsFromVideo(
        custom_objects=custom_objects,
        return_detected_frame=set_return_detected_frame(True),
        per_frame_function=process_per_frame,
        camera_input=camera,
        save_detected_video=True,
        output_file_path="detected_video",
        minimum_percentage_probability=50,
        frames_per_second=2,
        log_progress=True)
    continue_detection = input("Find another object?(Y/N) ")

    if continue_detection.upper() == "Y":
        vibrations.open_glove()
        camera = cv2.VideoCapture(0)
    else:
        print("Shutting down")

# EOF
Example #12
0
# default device camera
camera = cv2.VideoCapture(0)

# set up the video detector
detector = VideoObjectDetection()

detector.setModelTypeAsYOLOv3()
detector.setModelPath(os.path.join(execution_path, "yolo.h5"))

detector.loadModel(detection_speed="flash")

custom_objects = detector.CustomObjects(person=True)


# gets executed on every frame of the video detection
def forFrame(frame_number, output_array, output_count):
    if 'person' in output_count:
        request.setLight(1)
    else:
        request.setLight(0)


video_path = detector.detectCustomObjectsFromVideo(
    custom_objects=custom_objects,
    camera_input=camera,
    output_file_path=os.path.join(execution_path, "camera_detected_video"),
    frames_per_second=20,
    log_progress=True,
    per_frame_function=forFrame,
    minimum_percentage_probability=40)
Example #13
0
        ##        print("acceleration",acceleration[i])
        i += 1


#
#==============================================================================
#             location[i]=data["box_points"]
#     for i in range(len(output_array)):
#         print(location[i])
#==============================================================================

execution_path = os.getcwd()

detector = VideoObjectDetection()
detector.setModelTypeAsRetinaNet()
detector.setModelPath(
    os.path.join(execution_path, 'resnet50_coco_best_v2.0.1.h5'))
detector.loadModel(detection_speed="fast")

custom_objects = detector.CustomObjects(car=True)

video_path = detector.detectCustomObjectsFromVideo(
    custom_objects=custom_objects,
    input_file_path=os.path.join(execution_path, 'traffic.mp4'),
    output_file_path=os.path.join(execution_path, 'traffic-detected'),
    frames_per_second=20,
    #==============================================================================
    #     frame_detection_interval=1,
    #==============================================================================
    per_frame_function=forFrame,
    minimum_percentage_probability=30)
Example #14
0
detector.setModelTypeAsYOLOv3()
# Path of the YOLO's file
detector.setModelPath(os.path.join(execution_path, "Resources/yolo.h5"))
# Choice of the detection's speed
# You can choose between normal" (default), "fast", "faster", "fastest" and "flash"
detector.loadModel(detection_speed="flash")
# We want to detect only the people in the video
custom_objects = detector.CustomObjects(person=True)

# Applying the model
detector.detectCustomObjectsFromVideo(
    # Objects to detect
    custom_objects=custom_objects,
    # Input file
    input_file_path=os.path.join(execution_path, "Resources/Video.mp4"),
    # Output file
    output_file_path=os.path.join(execution_path, "Output"),
    # Put True if you want the percentage to be displayed in the output image
    display_percentage_probability=False,
    # Put False if you don't wanna see in terminal the progress of the process
    log_progress=True)

import cv2
import numpy as np
# Displaying the video
# Create a VideoCapture object and read from input file
cap = cv2.VideoCapture('Output.avi')
# Check if camera opened successfully
if (cap.isOpened() == False):
    print("Error opening video file")
# Read until video is completed
# -*- coding: utf-8 -*-
"""
Created on Sat Mar 23 01:08:15 2019

@author: aryaman
"""

from imageai.Detection import VideoObjectDetection
import os

execution_path = os.getcwd()

detector = VideoObjectDetection()
detector.setModelTypeAsYOLOv3()
detector.setModelPath(os.path.join(execution_path, "yolo(1).h5"))
detector.loadModel()

custom_objects = detector.CustomObjects(person=True)

video_path = detector.detectCustomObjectsFromVideo(
    custom_objects=custom_objects,
    input_file_path=os.path.join(execution_path, "iitg.mp4"),
    output_file_path=os.path.join(execution_path, "iitg_Detected_1"),
    frames_per_second=12,
    log_progress=True)

print(video_path)
class VideoObjectDetector:
    def __init__(self, exec_path):
        self.exec_path = exec_path
        self.detector = VideoObjectDetection()
        self.config = ConfigLoader().conf
        self.detector.setModelTypeAsRetinaNet()
        self.detector.setModelPath(
            os.path.join(self.exec_path,
                         self.config['obj-detector.model-path']))
        self.detector.loadModel()

    def for_frame(self,
                  frame_number,
                  output_array,
                  output_count,
                  returned_frame,
                  resized=False):
        color_index = {
            'bus': 'red',
            'handbag': 'steelblue',
            'giraffe': 'orange',
            'spoon': 'gray',
            'cup': 'yellow',
            'chair': 'green',
            'elephant': 'pink',
            'truck': 'indigo',
            'motorcycle': 'azure',
            'refrigerator': 'gold',
            'keyboard': 'violet',
            'cow': 'magenta',
            'mouse': 'crimson',
            'sports ball': 'raspberry',
            'horse': 'maroon',
            'cat': 'orchid',
            'boat': 'slateblue',
            'hot dog': 'navy',
            'apple': 'cobalt',
            'parking meter': 'aliceblue',
            'sandwich': 'skyblue',
            'skis': 'deepskyblue',
            'microwave': 'peacock',
            'knife': 'cadetblue',
            'baseball bat': 'cyan',
            'oven': 'lightcyan',
            'carrot': 'coldgrey',
            'scissors': 'seagreen',
            'sheep': 'deepgreen',
            'toothbrush': 'cobaltgreen',
            'fire hydrant': 'limegreen',
            'remote': 'forestgreen',
            'bicycle': 'olivedrab',
            'toilet': 'ivory',
            'tv': 'khaki',
            'skateboard': 'palegoldenrod',
            'train': 'cornsilk',
            'zebra': 'wheat',
            'tie': 'burlywood',
            'orange': 'melon',
            'bird': 'bisque',
            'dining table': 'chocolate',
            'hair drier': 'sandybrown',
            'cell phone': 'sienna',
            'sink': 'coral',
            'bench': 'salmon',
            'bottle': 'brown',
            'car': 'silver',
            'bowl': 'maroon',
            'tennis racket': 'palevilotered',
            'airplane': 'lavenderblush',
            'pizza': 'hotpink',
            'umbrella': 'deeppink',
            'bear': 'plum',
            'fork': 'purple',
            'laptop': 'indigo',
            'vase': 'mediumpurple',
            'baseball glove': 'slateblue',
            'traffic light': 'mediumblue',
            'bed': 'navy',
            'broccoli': 'royalblue',
            'backpack': 'slategray',
            'snowboard': 'skyblue',
            'kite': 'cadetblue',
            'teddy bear': 'peacock',
            'clock': 'lightcyan',
            'wine glass': 'teal',
            'frisbee': 'aquamarine',
            'donut': 'mincream',
            'suitcase': 'seagreen',
            'dog': 'springgreen',
            'banana': 'emeraldgreen',
            'person': 'honeydew',
            'surfboard': 'palegreen',
            'cake': 'sapgreen',
            'book': 'lawngreen',
            'potted plant': 'greenyellow',
            'toaster': 'ivory',
            'stop sign': 'beige',
            'couch': 'khaki'
        }

        plt.clf()

        this_colors = []
        labels = []
        sizes = []

        counter = 0

        for eachItem in output_count:
            counter += 1
            labels.append(eachItem + " = " + str(output_count[eachItem]))
            sizes.append(output_count[eachItem])
            this_colors.append(color_index[eachItem])

        resized

        if not resized:
            manager = plt.get_current_fig_manager()
            manager.resize(width=1000, height=500)
            resized = True

        plt.subplot(1, 2, 1)
        plt.title("Frame : " + str(frame_number))
        plt.axis("off")
        plt.imshow(returned_frame, interpolation="none")

        plt.subplot(1, 2, 2)
        plt.title("Analysis: " + str(frame_number))
        plt.pie(sizes,
                labels=labels,
                colors=this_colors,
                shadow=True,
                startangle=140,
                autopct="%1.1f%%")

        plt.pause(0.01)

    def run_inference_on(self, camera):
        self.detector.detectCustomObjectsFromVideo(
            custom_objects=self.detector.CustomObjects(person=True),
            camera_input=camera,
            frames_per_second=2,
            log_progress=True,
            save_detected_video=False,
            per_frame_function=self.for_frame,
            return_detected_frame=True)
Example #17
0
    print("MINUTE : ", minute_number)
    print("Array for the outputs of each frame ", output_arrays)
    print("Array for output count for unique objects in each frame : ",
          count_arrays)
    print("Output average count for unique objects in the last minute: ",
          average_output_count)
    print("------------END OF A MINUTE --------------")


detector = VideoObjectDetection()
detector.setModelTypeAsYOLOv3()
detector.setModelPath("/Users/zhusheng/WorkSpace/Tmp/dataset/models/yolo.h5")
detector.loadModel()

# 自定义预测对象
custom_objects = detector.CustomObjects(person=True,
                                        bicycle=True,
                                        motorcycle=True)

# 开始预测
video_path = detector.detectCustomObjectsFromVideo(
    custom_objects=custom_objects,
    input_file_path="video/traffic.mp4",
    output_file_path="video/traffic_detedted2",
    frames_per_second=10,
    per_second_function=forSeconds,
    per_frame_function=forFrame,
    per_minute_function=forMinute,
    minimum_percentage_probability=30)

print(video_path)
Example #18
0
import os

boxes = []


def forFrame(frame_number, output_array, output_count):
    boxes.append(output_array)
    print("FOR FRAME ", frame_number)
    print("Output for each object : ", output_array)
    print("Output count for unique objects : ", output_count)
    print("------------END OF A FRAME --------------")


execution_path = os.getcwd()

detector = VideoObjectDetection()
detector.setModelTypeAsRetinaNet()
detector.setModelPath(
    os.path.join(execution_path, "checkpoints/resnet50_coco_best_v2.1.0.h5"))
detector.loadModel()

custom_objects = detector.CustomObjects(sports_ball=True, baseball_bat=True)

video_path = detector.detectCustomObjectsFromVideo(
    custom_objects=custom_objects,
    input_file_path=os.path.join(execution_path, "data/red_cricket.avi"),
    output_file_path=os.path.join(execution_path, "results/bat_ball_det"),
    frames_per_second=2,
    log_progress=True,
    per_frame_function=forFrame)
print(video_path)
Example #19
0
from imageai.Detection import VideoObjectDetection
import os
import cv2 as cv

execution_path = os.getcwd()

camera = cv.VideoCapture("http://195.189.181.205/mjpg/video.mjpg")

#camera = cv.VideoCapture("http://207.192.232.2:8000/mjpg/video.mjpg")
detector = VideoObjectDetection()
detector.setModelTypeAsYOLOv3()
detector.setModelPath(os.path.join(execution_path, "yolo.h5"))
detector.loadModel(detection_speed="faster")

custom_objects = detector.CustomObjects(car=True, truck=True)

video_path = detector.detectCustomObjectsFromVideo(
    camera_input=camera,
    output_file_path=os.path.join(execution_path,
                                  "rtmp://a.rtmp.youtube.com/live2"),
    frames_per_second=30,
    log_progress=True)
print(video_path)
from imageai.Detection import VideoObjectDetection
import os

execution_path = os.getcwd()

detector = VideoObjectDetection()
detector.setModelTypeAsRetinaNet()
detector.setModelPath(
    os.path.join(execution_path, "resnet50_coco_best_v2.0.1.h5"))
detector.loadModel(detection_speed="flash")

custom_objects = detector.CustomObjects(person=True,
                                        bicycle=True,
                                        motorcycle=True)

video_path = detector.detectCustomObjectsFromVideo(
    custom_objects=custom_objects,
    input_file_path=os.path.join(execution_path, "traffic-small.mp4"),
    output_file_path=os.path.join(execution_path,
                                  "traffic_small_custom_flash_detected"),
    frames_per_second=20,
    log_progress=True)
print(video_path)
Example #21
0
from imageai.Detection import VideoObjectDetection
import os
import cv2

execution_path = os.getcwd()

camera = cv2.VideoCapture("http://192.168.43.41:8080/video")

detector = VideoObjectDetection()
detector.setModelTypeAsTinyYOLOv3()
detector.setModelPath(os.path.join(execution_path , "yolo-tiny.h5"))
detector.loadModel("fastest")

custom_objects = detector.CustomObjects(car=True, motorcycle=True, truck=True)

video_path = detector.detectCustomObjectsFromVideo(
    custom_objects=custom_objects,
    #input_file_path=os.path.join(execution_path, "traffic.mp4"),
    camera_input=camera,
    output_file_path=os.path.join(execution_path, "traffic_detected"),
    frames_per_second=17,
    minimum_percentage_probability=5,
    frame_detection_interval=2,
    log_progress=True
)
Example #22
0
if __name__ == '__main__':
    execution_path = os.getcwd()
    detector = VideoObjectDetection()
    detector.setModelTypeAsYOLOv3()
    detector.setModelPath(os.path.join(execution_path, "yolo.h5"))
    detector.loadModel(detection_speed="faster")

    custom_objects = detector.CustomObjects(car=True,
                                            person=True,
                                            bus=True,
                                            chair=True,
                                            truck=True,
                                            refrigerator=True,
                                            oven=True,
                                            bicycle=True,
                                            skateboard=True,
                                            train=True,
                                            bench=True,
                                            motorcycle=True,
                                            bed=True,
                                            suitcase=True)

    detections = detector.detectCustomObjectsFromVideo(
        camera_input=camera,
        custom_objects=custom_objects,
        save_detected_video=False,
        return_detected_frame=True,
        per_frame_function=forFrame,
        minimum_percentage_probability=30,
        frames_per_second=20,
    )
print("time spent in conversion: {}".format(elapsed_time_cv))
input_movie.release()

detector5 = VideoObjectDetection()
detector5.setModelTypeAsYOLOv3()
detector5.setModelPath(os.path.join(execution_path, "model_data/yolo.h5"))
detector5.loadModel()

custom_objects = detector5.CustomObjects(person=True)

start = time.time()
video_path = detector5.detectCustomObjectsFromVideo(
    custom_objects=custom_objects,
    input_file_path=os.path.join(execution_path,
                                 "data/test_videos/hamilton_clip.mp4"),
    output_file_path=os.path.join(execution_path,
                                  "data/test_videos/obj_det_normal"),
    frames_per_second=29)
eplased_time = time.time() - start
print("time spent in conversion: {}".format(eplased_time))
print(video_path)

detector5 = VideoObjectDetection()
detector5.setModelTypeAsYOLOv3()
detector5.setModelPath(os.path.join(execution_path, "model_data/yolo.h5"))
detector5.loadModel(detection_speed="fast")

custom_objects = detector5.CustomObjects(person=True)

start = time.time()
    global resized

    if (resized == False):
        manager = plt.get_current_fig_manager()
        manager.resize(1000, 500)
        resized = True

    plt.subplot(1, 2, 1)
    plt.title("Frame : " + str(frame_number))
    plt.axis("off")
    plt.imshow(returned_frame, interpolation="none")

    plt.subplot(1, 2, 2)
    plt.title("Analysis: " + str(frame_number))
    plt.pie(sizes, labels=labels, colors=this_colors, shadow=True, startangle=140, autopct="%1.1f%%")

    plt.pause(0.01)


camera = cv2.VideoCapture(0)
video_detector = VideoObjectDetection()
video_detector.setModelTypeAsYOLOv3()
video_detector.setModelPath(os.path.join(execution_path, "yolo.h5"))
objects = video_detector.CustomObjects(person=True)
video_detector.loadModel()

plt.show()

video_detector.detectCustomObjectsFromVideo(custom_objects = objects, camera_input=camera , output_file_path=os.path.join(execution_path, "video_frame_analysis") ,  frames_per_second=20, per_frame_function=forFrame,  minimum_percentage_probability=30, return_detected_frame=True)
execution_path = os.getcwd()

detector = VideoObjectDetection()
detector.setModelTypeAsRetinaNet()
detector.setModelPath(
    os.path.join(
        execution_path,
        "C:\\Users\\hp\\Downloads\\imageAI objectDetectionByvideo2\\resnet50_coco_best_v2.0.1.h5"
    ))
detector.loadModel()
#detector.loadModel(detection_speed="fast")

custom_objects = detector.CustomObjects(person=True,
                                        bicycle=True,
                                        motorcycle=True)

video_path = detector.detectCustomObjectsFromVideo(
    custom_objects=custom_objects,
    input_file_path=os.path.join(
        execution_path,
        "C:\\Users\\hp\\Downloads\\imageAI objectDetectionByvideo2\\input video\\traffic.mp4"
    ),
    output_file_path=os.path.join(
        execution_path,
        "C:\\Users\\hp\\Downloads\\imageAI objectDetectionByvideo2\\output video\\traffic_custom_detected"
    ),
    frames_per_second=20,
    log_progress=True)
print(video_path)
Example #26
0
execution_path = os.getcwd()

#le decimos a opencv que usaremos la camara de cierto index
camera = cv2.VideoCapture(1)

#iniciamos el detector
detector = VideoObjectDetection()
detector.setModelTypeAsYOLOv3()
detector.setModelPath(os.path.join(execution_path, "yolo.h5"))
detector.loadModel()

#selecionamos cuales objectos queremos detectar
custom_objects = detector.CustomObjects(person=True, chair=True)

#variables para desplegar el video en tiempo real
fourcc = cv2.VideoWriter_fourcc(*'XVID')
out = cv2.VideoWriter('output-count2.avi', fourcc, 29.0, (640, 480))

#funcion de la libreria imageai para detectar los objectos
video_path = detector.detectCustomObjectsFromVideo(
    save_detected_video=False,
    return_detected_frame=True,
    custom_objects=custom_objects,
    camera_input=camera,
    output_file_path=os.path.join(execution_path, "camera_detected_91"),
    frames_per_second=29,
    log_progress=True,
    per_frame_function=forFrame)
print(video_path)
Example #27
0
from imageai.Detection import VideoObjectDetection
import os
import time
start = time.time()

execution_path = os.getcwd()

detector = VideoObjectDetection()

detector.setModelTypeAsTinyYOLOv3()
detector.setModelPath(os.path.join(execution_path, "yolo-tiny.h5"))
detector.loadModel()

video_path = detector.detectCustomObjectsFromVideo(
    input_file_path=os.path.join(execution_path, "video2.mp4"),
    output_file_path=os.path.join(execution_path, "detectedvideo2"),
    frames_per_second=30,
    log_progress=True)
print(video_path)

end = time.time()

print("\ntime:", end - start)

execution_path = os.getcwd()

detector2 = ObjectDetection()

detector2.setModelTypeAsRetinaNet()

detector2.setModelPath(
custom_objects = detector.CustomObjects(person=True)


#Create the function that is run every second the camera is recording.
def forSeconds(second_number, output_arrays, count_arrays,
               average_output_count):
    number = ""
    people_in_frame = str(average_output_count)

    #If the system doesn't detect any people in the frame, it will print 0.
    if people_in_frame == "{}":
        number = "0"
    else:
        #The number of people in the frame is prepared.
        #This for loop analyses every character in the output and gathers
        #any digits and joins them in "number" to get a final number of people.
        for i in range(0, len(people_in_frame)):
            if people_in_frame[i].isdigit() == True:
                number = number + people_in_frame[i]
    clear_output()
    print(number)


video_path = detector.detectCustomObjectsFromVideo(
    custom_objects=custom_objects,
    camera_input=camera,
    save_detected_video=False,
    frames_per_second=5,
    per_second_function=forSeconds,
    minimum_percentage_probability=20)
Example #29
0
from imageai.Detection import VideoObjectDetection
import os

execution_path = os.getcwd()

detector = VideoObjectDetection()
detector.setModelTypeAsRetinaNet()
detector.setModelPath("/Users/zhusheng/WorkSpace/Tmp/dataset/models/resnet50_coco_best_v2.0.1.h5")
detector.loadModel()

# 自定义预测对象
custom_objects = detector.CustomObjects(person=True, bicycle=True, motorcycle=True)

# 开始预测
video_path = detector.detectCustomObjectsFromVideo(custom_objects = custom_objects,
                                      input_file_path="video/traffic.mp4",
                                      output_file_path="video/custom_detected",
                                      frames_per_second=20,
                                      log_progress=True)

print(video_path)
from imageai.Detection import VideoObjectDetection
import os

execution_path = os.getcwd()

detector = VideoObjectDetection()
detector.setModelTypeAsRetinaNet()
detector.setModelPath( os.path.join(execution_path , "resnet50_coco_best_v2.0.1.h5"))
detector.loadModel(detection_speed="flash")

custom_objects = detector.CustomObjects(person=True, bicycle=True, motorcycle=True)

video_path = detector.detectCustomObjectsFromVideo(custom_objects=custom_objects, input_file_path=os.path.join(execution_path, "traffic-small.mp4"),
                                output_file_path=os.path.join(execution_path, "traffic_small_custom_flash_detected")
                                , frames_per_second=20, log_progress=True)
print(video_path)
            # cv2.destroyAllWindows()
        plt.title("Frame : " + str(frame_number))
        plt.axis("off")
        plt.imshow(returned_frame, interpolation="none")

        # plt.subplot(1, 2, 2)
        # plt.title("Analysis: " + str(frame_number))
        # plt.pie(sizes, labels=labels, colors=this_colors, shadow=True, startangle=140, autopct="%1.1f%%")

        plt.pause(0.000001)

detector.detectCustomObjectsFromVideo(custom_objects=custom,
                                                    input_file_path=os.path.join(execution_path,"99.mp4"),
                                                    save_detected_video=True,
                                                    per_frame_function=forFrame,
                                                    output_file_path=os.path.join(execution_path, "samplevideo"),
                                                    minimum_percentage_probability=99,
                                                    frames_per_second=45,
                                                    return_detected_frame=True
                                                    )
    # for eachObject, eachObjectPath in zip(detections, object_path):
    #     box_points = eachObject['box_points']
    #     if (box_points[1]+box_points[3])/2 == 300:
    #         cv2.imshow('image',eachObjectPath)


# !/usr/bin/env python
# coding: utf-8

# In[1]: