from imageai.Detection.Custom import CustomVideoObjectDetection import os execution_path = os.getcwd() detector = CustomVideoObjectDetection() detector.setModelTypeAsYOLOv3() detector.setModelPath("detection_model-ex-015--loss-0011.676.h5") detector.setJsonPath("detection_config.json") detector.loadModel() detector.detectObjectsFromVideo(input_file_path="video.mp4", output_file_path=os.path.join( execution_path, "traffic_face"), frames_per_second=30, minimum_percentage_probability=40, log_progress=True)
from imageai.Detection.Custom import CustomVideoObjectDetection import os execution_path = os.getcwd() video_detector = CustomVideoObjectDetection() video_detector.setModelTypeAsYOLOv3() video_detector.setModelPath( "Pothole.h5" ) # download via https://github.com/OlafenwaMoses/ImageAI/releases/download/essential-v4/hololens-ex-60--loss-2.76.h5 video_detector.setJsonPath( "detection_config.json" ) # download via https://github.com/OlafenwaMoses/ImageAI/releases/download/essential-v4/detection_config.json video_detector.loadModel() video_detector.detectObjectsFromVideo(input_file_path="Video_Input", output_file_path=os.path.join( execution_path, "Pothole"), frames_per_second=20, minimum_percentage_probability=40, log_progress=True)
from imageai.Detection.Custom import CustomVideoObjectDetection import os import cv2 execution_path = os.getcwd() camera = cv2.VideoCapture(0) detector = CustomVideoObjectDetection() detector.setModelTypeAsYOLOv3() detector.setModelPath( os.path.join(execution_path, "detection_model-ex-015--loss-0011.676.h5")) detector.setJsonPath("detection_config.json") detector.loadModel() detector.detectObjectsFromVideo(camera_input=camera, output_file_path=os.path.join( execution_path, "web_face"), frames_per_second=20, minimum_percentage_probability=40, log_progress=True, frame_detection_interval=1)
def videoLoop(): execution_path = os.getcwd() camera = cv2.VideoCapture(0) video_detector = CustomVideoObjectDetection() video_detector.setModelTypeAsYOLOv3() video_detector.setModelPath("detection_model-ex-012--loss-0003.960.h5") #Download the model from "https://github.com/NAERSTEAM/EELAB/releases/download/20202603_0/detection_model-ex-012--loss-0003.960.h5" video_detector.setJsonPath("detection_config HandSign.json") video_detector.loadModel(detection_speed='fastest') video_detector.detectObjectsFromVideo(camera_input=camera, output_file_path=os.path.join(execution_path, "HandSignRecognition"), frames_per_second=20, minimum_percentage_probability=80, log_progress=False, save_detected_video=False, per_frame_function=per_frame_function_DataGet, return_detected_frame=True)
#based on custom object detection example of imageAI --https://imageai.readthedocs.io/en/latest/customdetection/ #For model tranning: https://colab.research.google.com/drive/1nv6TluehJkFGKLdbnHayWXa0gVzI2voB #Apr 28 2020. T.I. Created this file from imageai.Detection.Custom import CustomVideoObjectDetection import os import cv2 execution_path = os.getcwd() camera = cv2.VideoCapture(0) video_detector = CustomVideoObjectDetection() video_detector.setModelTypeAsYOLOv3() video_detector.setModelPath("detection_model-ex-012--loss-0003.944.h5") #Please download the h5 modle from "https://drive.google.com/file/d/1sNdDf_gCu8QZ16xxMkq_U3THAyewsVxT/view?usp=sharing" video_detector.setJsonPath("detection_config.json") video_detector.loadModel() video_detector.detectObjectsFromVideo( camera_input=camera, output_file_path=os.path.join(execution_path, "maskesDetection"), frames_per_second=2, minimum_percentage_probability=60, log_progress=True, )
from imageai.Detection.Custom import CustomVideoObjectDetection import os execution_path = os.getcwd() video_detector = CustomVideoObjectDetection() video_detector.setModelTypeAsYOLOv3() video_detector.setModelPath("hololens-ex-60--loss-2.76.h5") video_detector.setJsonPath("detection_config.json") video_detector.loadModel() video_detector.detectObjectsFromVideo(input_file_path="1.mp4", output_file_path=os.path.join( execution_path, "holo1-detected3"), frames_per_second=20, minimum_percentage_probability=40, log_progress=True)
def detect_from_video(): detector = CustomVideoObjectDetection() detector.setModelTypeAsYOLOv3() detector.setModelPath(detection_model_path=os.path.join( execution_path, "detection_model-ex-33--loss-4.97.h5")) detector.setJsonPath(configuration_json=os.path.join( execution_path, "detection_config.json")) detector.loadModel() detected_video_path = detector.detectObjectsFromVideo( input_file_path=os.path.join(execution_path, "video1.mp4"), frames_per_second=30, output_file_path=os.path.join(execution_path, "video1-detected"), minimum_percentage_probability=40, log_progress=True)
def recognize_custom(image_path, vehicle_type, isVideo): if vehicle_type == 'TANK': model_name = 'tank_model_3_loss-0008.h5' json_name = 'tank_detection_config.json' elif vehicle_type == 'APC': model_name = 'apc_model_6_loss-0012.h5' json_name = 'apc_detection_config.json' elif vehicle_type == 'SHIP': print('3') elif vehicle_type == 'AERO': print('4') folder_path = str( os.getcwd()) + "\\recognize_app\\rec_api\\imageairecognizer\\" if isVideo == False: detector = CustomObjectDetection() detector.setModelTypeAsYOLOv3() detector.setModelPath(folder_path + model_name) detector.setJsonPath(folder_path + json_name) detector.loadModel() detector = detector.detectObjectsFromImage( input_image=image_path, output_image_path=image_path, minimum_percentage_probability=30) else: video_detector = CustomVideoObjectDetection() video_detector.setModelTypeAsYOLOv3() video_detector.setModelPath(folder_path + model_name) video_detector.setJsonPath(folder_path + json_name) video_detector.loadModel() print(Path(image_path).stem) video_detector = video_detector.detectObjectsFromVideo( input_file_path=image_path, output_file_path=image_path, minimum_percentage_probability=30, frames_per_second=30, log_progress=True) os.remove(image_path) p = Path(video_detector) FILE_NAME = p.with_suffix('').with_suffix('').name os.popen( "ffmpeg -i {input} -ac 2 -b:v 2000k -c:a aac -c:v libx264 -b:a 160k -vprofile high -bf 0 -strict experimental -f mp4 {output}.mp4" .format(input=p, output='.\media\{0}'.format(FILE_NAME))).close()
def test_custom_video_detection_yolov3_analysis(): detector = CustomVideoObjectDetection() detector.setModelTypeAsYOLOv3() detector.setModelPath(model_path) detector.setJsonPath(model_json) detector.loadModel() video_path = detector.detectObjectsFromVideo( input_file_path=video_file, output_file_path=video_file_output, save_detected_video=True, frames_per_second=30, log_progress=True, per_frame_function=forFrame, per_second_function=forSecond, return_detected_frame=True) assert os.path.exists(video_file_output + ".avi") assert isinstance(video_path, str) os.remove(video_file_output + ".avi")
manager = plt.get_current_fig_manager() manager.resize(width=1000, height=500) resized = True plt.subplot(1, 2, 1) plt.title("Frame : " + str(frame_number)) plt.axis("off") plt.imshow(returned_frame, interpolation="none") plt.subplot(1, 2, 2) plt.title("Analysis: " + str(frame_number)) plt.pie(sizes, labels=labels, colors=this_colors, shadow=True, startangle=140, autopct="%1.1f%%") plt.pause(0.01) video_detector = CustomVideoObjectDetection() video_detector.setModelTypeAsYOLOv3() video_detector.setModelPath("./models/detection_model-ex-010--loss-0005.362.h5") video_detector.setJsonPath("detection_config.json") video_detector.loadModel() plt.show() ret = video_detector.detectObjectsFromVideo(input_file_path="benchmark.mp4", output_file_path=os.path.join(execution_path, "weirdtest"), frames_per_second=20, minimum_percentage_probability=10, log_progress=True, per_frame_function=forFrame, return_detected_frame=True) print(ret)
def videoLoop(): execution_path = os.getcwd() camera = cv2.VideoCapture(0) video_detector = CustomVideoObjectDetection() video_detector.setModelTypeAsYOLOv3() video_detector.setModelPath( "detection_model-ex-012--loss-0003.944.h5" ) #Download the model from "https://github.com/firmamentone/masksDetection/releases/download/20200501_0/detection_model-ex-012--loss-0003.944.h5" video_detector.setJsonPath("detection_config_Mask.json") video_detector.loadModel(detection_speed='fastest') video_detector.detectObjectsFromVideo( camera_input=camera, output_file_path=os.path.join(execution_path, "holo1-detected3"), frames_per_second=20, minimum_percentage_probability=40, log_progress=True, save_detected_video=False, live_window=True) #AIT parameter:live_window
#Cution:This example is not supported by the officail ImageAI!!! #This is the examplpe for AIT test "Live window" from imageai.Detection.Custom import CustomVideoObjectDetection import os import cv2 execution_path = os.getcwd() camera = cv2.VideoCapture(0) video_detector = CustomVideoObjectDetection() video_detector.setModelTypeAsYOLOv3() video_detector.setModelPath("detection_model-ex-012--loss-0003.944.h5") video_detector.setJsonPath("detection_config_Mask.json") video_detector.loadModel() video_detector.detectObjectsFromVideo( camera_input=camera, output_file_path=os.path.join(execution_path, "maskDetection"), frames_per_second=20, minimum_percentage_probability=40, log_progress=True, save_detected_video=False, live_window=True) #AIT parameter:live_window
def detect_from_camera(): camera = cv2.VideoCapture(0) # scale down video for better performance camera.set(3, 320) # camera width camera.set(4, 240) # camera height camera.set(30, 0.1) #camera fps detector = CustomVideoObjectDetection() detector.setModelTypeAsYOLOv3() detector.setModelPath( detection_model_path=os.path.join(execution_path, model_path)) detector.setJsonPath( configuration_json=os.path.join(execution_path, model_config)) detector.loadModel() detected_video_path = detector.detectObjectsFromVideo( camera_input=camera, per_frame_function=forFrame, save_detected_video=False, minimum_percentage_probability=40, log_progress=True, return_detected_frame=True)
def detect_from_video(video_input): detector = CustomVideoObjectDetection() detector.setModelTypeAsYOLOv3() detector.setModelPath( detection_model_path=os.path.join(execution_path, model_path)) detector.setJsonPath( configuration_json=os.path.join(execution_path, model_config)) detector.loadModel() detected_video_path = detector.detectObjectsFromVideo( input_file_path=os.path.join(execution_path, video_input), per_frame_function=forFrame, save_detected_video=False, minimum_percentage_probability=40, log_progress=True, return_detected_frame=True)
from imageai.Detection.Custom import CustomVideoObjectDetection import os import cv2 from matplotlib import pyplot as plt execution_path = os.getcwd() camera = cv2.VideoCapture(0) detector = CustomVideoObjectDetection() detector.setModelTypeAsYOLOv3() detector.setModelPath( os.path.join( execution_path, r"C:\Users\This\Downloads\Copy of detection_model-ex-010--loss-0007.627.h5" )) detector.setJsonPath(r"C:\Users\This\Downloads\Copy of detection_config.json") detector.loadModel() video_path = detector.detectObjectsFromVideo(camera_input=camera, output_file_path=os.path.join( execution_path, "camera_detected_video"), frames_per_second=30, log_progress=True, minimum_percentage_probability=40, return_detected_frame=True, detection_timeout=120)
camera = cv2.VideoCapture(0) def show_frame(camera): if (not camera.isOpened()): print("camera not opened") else: print("camera opened") _, frame = camera.read() cv2.waitKey(20) cv2.imwrite('camera_view.png', frame) #cv2.destroyAllWindows() model_path = "models/detection_model-ex-030--loss-0002.725.h5" video_detector = CustomVideoObjectDetection() video_detector.setModelTypeAsYOLOv3() video_detector.setModelPath(model_path) video_detector.setJsonPath("./models/detection_config.json") video_detector.loadModel() ''' custom = video_detector.CustomObjects(person=True, handbag=True, tie=True, suitcase=True, bottle=True, wine_glass=True, cup=True, fork=True, knife=True, spoon=True, bowl=True, banana=True, apple=True, sandwich=True, orange=True, pizza=True, donut=True, cake=True, chair=True, potted_plant=True, laptop=True, mouse=True, remote=True, keyboard=True, cell_phone=True, book=True, clock=True, scissors=True) ''' def objectsInFrame(frame_number, output_array, output_count, returned_frame): print("output_array length %i" % len(output_array)) items = len(output_count)
# Importing the image AI library # this library is an API used object detection system from imageai.Detection.Custom import CustomVideoObjectDetection import os import cv2 execution_path = os.getcwd() # using openCV to start the camera for taking the video feed camera = cv2.VideoCapture(0) detector = CustomVideoObjectDetection() # our model is trained using transfer learning on YOLOV3. detector.setModelTypeAsYOLOv3() # put your trained keras model path in the space provided. detector.setModelPath("Path to trained keras model") # put the configuration file's path over here detector.setJsonPath("path to json configuration file") detector.loadModel() #here the video is taken from the camera and in the outpat path, put your path where you can access the output video feed. #minimum_percentage_probability : you can increase or decrease to fine tune your output. detector.detectObjectsFromVideo(camera_input=camera, output_file_path=os.path.join( execution_path, "name of output video"), frames_per_second=16, minimum_percentage_probability=40, log_progress=True)
#Cution:This example is not supported by the officail ImageAI!!! #This is the examplpe for AIT test "Live window" from imageai.Detection.Custom import CustomVideoObjectDetection import os import cv2 os.environ['CUDA_VISIBLE_DEVICES'] = '-1' #Tensorflow backend be forced to use CPU for the tensorflow 1.15.0 or higher execution_path = os.getcwd() camera = cv2.VideoCapture(0) video_detector = CustomVideoObjectDetection() video_detector.setModelTypeAsYOLOv3() video_detector.setModelPath("detection_model-ex-012--loss-0003.960.h5") #Download the model from "https://github.com/NAERSTEAM/EELAB/releases/download/20202603_0/detection_model-ex-012--loss-0003.960.h5" video_detector.setJsonPath("detection_config HandSign.json") video_detector.loadModel(detection_speed="fastest") video_detector.detectObjectsFromVideo(camera_input=camera, output_file_path=os.path.join(execution_path, "HandSignRecognition"), frames_per_second=20, minimum_percentage_probability=80, log_progress=True, save_detected_video=False, live_window=True)