Exemple #1
0
def GetObjectsList(name,expansion):
    path_yolo=os.path.abspath("yolo.h5")
    video=VideoFileClip(f"{path}.{expansion}")
    video=video.set_fps(1)
    video=video.without_audio()
    video=video.fx(vfx.speedx,5)
    video.write_videofile(f"{name}_temp.{expansion}")

    ListObjects=[]
    def forFrame(frame_number, output_array, output_count):
        
        for k in output_array:
            if k["name"] in ListObjects:
                pass
            else:
                ListObjects.append(k["name"])
        
        



    video_detector = VideoObjectDetection()
    video_detector.setModelTypeAsYOLOv3()
    video_detector.setModelPath(path_yolo)
    video_detector.loadModel()


    video_detector.detectObjectsFromVideo(input_file_path=f"{name}_temp.{expansion}", save_detected_video=False, per_frame_function=forFrame,  minimum_percentage_probability=70,log_progress=False,display_percentage_probability=False)
    os.remove(f"{name}_temp.{expansion}")
    return ListObjects
Exemple #2
0
def main():
    execution_path = os.getcwd()
    dic = {}

    def forFrame(frame_number, output_array, output_count):
        print("Frame Number : ", frame_number)
        dic[frame_number] = output_array

    video_detector = VideoObjectDetection()
    video_detector.setModelTypeAsYOLOv3()
    video_detector.setModelPath(os.path.join(execution_path, "yolo.h5"))
    video_detector.loadModel(detection_speed="fast")

    video_detector.detectObjectsFromVideo(input_file_path=os.path.join(
        execution_path, "video.mp4"),
                                          save_detected_video=False,
                                          frames_per_second=20,
                                          per_frame_function=forFrame,
                                          minimum_percentage_probability=30)
    for k, v in dic.items():
        print(str(k) + " : " + str(v))
    video_path = video_detector.detectObjectsFromVideo(
        input_file_path=os.path.join(execution_path, "video.mp4"),
        output_file_path=os.path.join(execution_path, "video_output"),
        frames_per_second=29,
        minimum_percentage_probability=30)
class VideoThread(Thread):
    opencvCamera = None;
    ShouldClose = False
    Video_Detect = None
    OutPath = VideoPath
    #OutPath = r"C:\Users\JackXu\PycharmProjects\untitled\VideoProcessing"
    def __init__(self,CameraObj):
        Thread.__init__(self)
        self.opencvCamera = CameraObj




    def run(self):
        if (self.ShouldClose) == True:
            self.exit()
        else:
            self.Video_Detect = VideoObjectDetection()
            self.Video_Detect.setModelTypeAsYOLOv3()
            #BeginTime = time.time()
            self.Video_Detect.setModelPath(ModelPath)
            #EndTime = time.time()
            #print(" How long to set and load", EndTime - BeginTime)
            #self.Video_Detect.setModelPath(r"C:\Users\JackXu\PycharmProjects\untitled\yolo.h5")
            self.Video_Detect.loadModel("fastest")
            self.Video_Detect.detectObjectsFromVideo(camera_input= self.opencvCamera,return_detected_frame=True,
                                                     output_file_path=VideoPath,
                                                      frames_per_second=30,
                                                     minimum_percentage_probability=40,
                                                     per_frame_function=showCV, save_detected_video=False)
            self.ShouldClose = True
def detect(var_name, var_model_path):
    #
    ##
    var_in_path = os.path.join(os.getcwd(), "cache", var_name + "IN.mp4")
    var_temp_path = os.path.join(os.getcwd(), "cache", var_name + "TEMP")
    # var_out_path = os.path.join(os.getcwd(), "cache", var_name + "OUT.mp4")
    #
    ##  Initiate the detector.
    var_detector = VideoObjectDetection()
    var_detector.setModelTypeAsRetinaNet()
    var_detector.setModelPath(var_model_path)
    var_detector.loadModel(detection_speed="fast")
    #
    ##  Perform the object detection.
    var_detector.detectObjectsFromVideo(input_file_path=var_in_path,
                                        output_file_path=var_temp_path,
                                        frames_per_second=30,
                                        log_progress=True,
                                        frame_detection_interval=1,
                                        minimum_percentage_probability=15)
    #
    ##  Convert the format of output video to MP4.
    var_clip = moviepy.VideoFileClip("cache/" + var_name + "TEMP.avi")
    var_clip.write_videofile("cache/" + var_name + "OUT.mp4")
    os.remove(var_temp_path + ".avi")
    return "Detect Finish!"
def detect():
    """
    Count number of people in test video per second
    """
    execution_path = os.getcwd()

    # Add your camera credential
    # camera = cv2.VideoCapture('rtsp://*****:*****@0.0.0.0')

    # Test Video
    camera = cv2.VideoCapture('test.mp4')

    def getSizeVD(vcap):
        """
        Get Size of Camera Frame
        """
        if vcap.isOpened():
            width  = vcap.get(3)
            height = vcap.get(4)
            return width*height

    video_size = getSizeVD(camera)

    def forFrame(frame_number, output_array, output_count):
        """
        Get size of person on frame
        """
        size_list = []
        for item in output_array:
            if item.get('name', '') is 'person':
                if item.get('box_points', []):
                    [x1,y1,x2,y2] = item.get('box_points', [])
                    size = (x2 - x1) * (y2 - y1)
                    size_list.append(size)
        if size_list:
            if max(size_list) > video_size * PERCENT:
                # Add your shell script
                print("Detected Closed Person")

    # Create Video Detection Module based on Yolo
    video_detector = VideoObjectDetection()
    video_detector.setModelTypeAsYOLOv3()
    video_detector.setModelPath(os.path.join(execution_path, "yolo.h5"))
    video_detector.loadModel()

    video_detector.detectObjectsFromVideo(
        camera_input=camera,
        output_file_path=os.path.join(execution_path, "test"),
        # save_detected_video=False,
        frames_per_second=10,
        per_frame_function=forFrame,
        minimum_percentage_probability=30
    )
Exemple #6
0
    def start(self):

        detector = VideoObjectDetection()
        detector.setModelTypeAsRetinaNet()
        detector.setModelPath(
            os.path.join(self.execution_path, "resnet50_coco_best_v2.0.1.h5"))
        detector.loadModel()
        detector.detectObjectsFromVideo(camera_input=self.cap,
                                        output_file_path=os.path.join(
                                            self.execution_path,
                                            "video_frame_analysis"),
                                        frames_per_second=30,
                                        per_frame_function=forFrame,
                                        minimum_percentage_probability=70,
                                        return_detected_frame=True)
def car_type_more_tell_video(path):
    execution_path = os.getcwd()

    detector = VideoObjectDetection()
    detector.setModelTypeAsRetinaNet()
    detector.setModelPath(
        os.path.join(execution_path, "resnet50_coco_best_v2.0.1.h5"))
    detector.loadModel("fast")

    detector.detectObjectsFromVideo(
        input_file_path=os.path.join(execution_path, path),
        output_file_path=os.path.join(execution_path, "test"),
        frames_per_second=20,
        log_progress=True)
    print("success")
Exemple #8
0
def detect_with_imageai_retinanet():

    def forFrame(frame_number, output_array, output_count):
        """Детекция bounding box'ов"""

        print("ДЛЯ КАДРА " , frame_number)
        print('Объект:', output_array[0]['name'])
        print('Вероятность:', output_array[0]['percentage_probability'])
        print('Bounding box:', output_array[0]['box_points'])
        print("Уникальных объектов: ", output_count[output_array[0]['name']])
        print("------------END OF A FRAME --------------\n\n")


    execution_path = os.getcwd()
    camera = cv2.VideoCapture(0)
    detector = VideoObjectDetection()
    detector.setModelTypeAsRetinaNet()
    detector.setModelPath(os.path.join(execution_path, "weights/resnet50_coco_best_v2.0.1.h5"))
    detector.loadModel()


    video_model = detector.detectObjectsFromVideo(camera_input=camera,
                                                  output_file_path=os.path.join(execution_path, "camera_detected_video"),
                                                  frames_per_second=20,
						  per_frame_function=forFrame,
                                                  minimum_percentage_probability=40)
Exemple #9
0
def detect_with_imageai_yolov3():

    """
    <-- CODE IF YOU NEED TO LOAD SOME VIDEO -->
    execution_path = os.getcwd()
    detector = VideoObjectDetection()
    detector.setModelTypeAsYOLOv3()
    detector.setModelPath(os.path.join(execution_path, "weights/yolo.h5"))
    detector.loadModel()

    video_path = detector.detectObjectsFromVideo(
        input_file_path=os.path.join(execution_path, "example_video.mp4"),
        output_file_path=os.path.join(execution_path, "Yolo.mp4"),
        frames_per_second=20, log_progress=True
    )
    """

    execution_path = os.getcwd()
    camera = cv2.VideoCapture(0)
    detector = VideoObjectDetection()
    detector.setModelTypeAsYOLOv3()
    detector.setModelPath(os.path.join(execution_path, "weights/yolo.h5"))
    detector.loadModel()


    video_model = detector.detectObjectsFromVideo(camera_input=camera,
                                                  output_file_path=os.path.join(execution_path, "camera_detected_video"),
                                                  frames_per_second=20,
                                                  log_progress=True,
                                                  minimum_percentage_probability=40)
Exemple #10
0
class MyDetector:
    myDetector = None
    def __init__(self):
        execution_path = os.getcwd()
        self.myDetector = VideoObjectDetection()
        # self.myDetector.setModelTypeAsYOLOv3()
        # self.myDetector.setModelPath(os.path.join(execution_path, "yolo.h5"))
        #self.myDetector.loadModel(detection_speed="flash")

    def process_ml(self):
        execution_path = os.getcwd()
        self.myDetector = VideoObjectDetection()
        self.myDetector.setModelTypeAsYOLOv3()
        self.myDetector.setModelPath(os.path.join(execution_path, "yolo.h5"))
        self.myDetector.loadModel(detection_speed="flash")
        video_path = self.myDetector.detectObjectsFromVideo(input_file_path=os.path.join(execution_path, "input.mp4"),
                                                output_file_path=os.path.join(execution_path, "upload/output_detected_1"), frames_per_second=29,
                                                
                                                per_frame_function=forFrame,
                                                per_second_function=forSeconds,
                                                per_minute_function=forMinute,
                                                video_complete_function=forFull,
                                                
                                                minimum_percentage_probability=10,

                                                log_progress=True)
        return video_path
    def capture(self, callback, gray=False):
        if not self.cap:
            sys.exit('The capture is not ready')

        while True:
            
            # esc, q
            ch = cv2.waitKey(10) & 0xFF
            if ch == 27 or ch == ord('q'):
                break
            t = cv2.getTickCount()
            
            execution_path = os.getcwd()
            detector = VideoObjectDetection()
            detector.setModelTypeAsTinyYOLOv3()
            detector.setModelPath(os.path.join(execution_path , "yolo-tiny.h5"))
            detector.loadModel()
            video_path = detector.detectObjectsFromVideo(
                    camera_input=self.cap,
                    output_file_path=os.path.join(execution_path, "camera_detected_video"),
                    frames_per_second=10, log_progress=True, minimum_percentage_probability=30)
            print(video_path)
            
        
            if self.openni:
                if not self.cap.grab():
                    sys.exit('Grabs the next frame failed')
                ret, depth = self.cap.retrieve(cv2.CAP_OPENNI_DEPTH_MAP)
def test_video_detection_retinanet_analysis():

    try:
        keras.backend.clear_session()
    except:
        None

    detector = VideoObjectDetection()
    detector.setModelTypeAsRetinaNet()
    detector.setModelPath(model_path=os.path.join(
        main_folder, "data-models", "resnet50_coco_best_v2.0.1.h5"))
    detector.loadModel(detection_speed="fastest")
    video_path = detector.detectObjectsFromVideo(
        input_file_path=video_file,
        output_file_path=video_file_output,
        save_detected_video=True,
        frames_per_second=30,
        log_progress=True,
        per_frame_function=forFrame,
        per_second_function=forSecond,
        return_detected_frame=True)

    assert os.path.exists(video_file_output + ".avi")
    assert isinstance(video_path, str)
    os.remove(video_file_output + ".avi")
Exemple #13
0
def start():
    global vf
    ifp = entry_file_path.get()
    ofp = entry_file_path1.get()
    if vf == 0:
        try:
            print("Starting to render video")
            detector = VideoObjectDetection()
            detector.setModelTypeAsYOLOv3()
            detector.setModelPath(os.path.join(exec_path, "yolo.h5"))
            detector.loadModel()
        except:
            wget.download(
                "https://github.com/OlafenwaMoses/ImageAI/releases/download/1.0/yolo.h5"
            )
            print("Starting to render video")
            detector = VideoObjectDetection()
            detector.setModelTypeAsYOLOv3()
            detector.setModelPath(os.path.join(exec_path, "yolo.h5"))
            detector.loadModel()
            list = detector.detectObjectsFromVideo(
                input_file_path=os.path.join(exec_path, ifp),
                output_file_path=os.path.join(exec_path, ofp),
                frames_per_second=20)
        label = Label(root, text="Succeful!", fg="green")
        label.pack()
    elif vf == 1:
        try:
            print("Starting to render photo")
            detector = ObjectDetection()
            detector.setModelTypeAsRetinaNet()
            detector.setModelPath(
                os.path.join(exec_path, "resnet50_coco_best_v2.0.1.h5"))
            detector.loadModel()
        except OSError:
            wget.download(
                "https://github.com/OlafenwaMoses/ImageAI/releases/download/1.0/resnet50_coco_best_v2.0.1.h5"
            )
            print("Starting to render photo")
            detector = ObjectDetection()
            detector.setModelTypeAsRetinaNet()
            detector.setModelPath(
                os.path.join(exec_path, "resnet50_coco_best_v2.0.1.h5"))
            detector.loadModel()
            list = detector.detectObjectsFromImage(
                input_image=os.path.join(exec_path, ifp),
                output_image_path=os.path.join(exec_path, ofp),
                display_percentage_probability=True,
                display_object_name=True)
        label = Label(root, text="Succeful!", fg="green")
        label.pack()
    elif vf == 2:
        image = face_recognition.load_image_file(exec_path + "/" + ifp)
        face_landmarks_list = face_recognition.face_landmarks(image)
        pil_image = Image.fromarray(image)
        d = ImageDraw.Draw(pil_image)
        for face_landmarks in face_landmarks_list:
            for facial_feature in face_landmarks.keys():
                d.line(face_landmarks[facial_feature], width=5)
        pil_image.show()
Exemple #14
0
def detected():
    form = detectForm(request.args)
    if form.validate():
        input_path = form.data['inputpath']
        output_path = form.data['outputpath']
        videoName = form.data['videoname']
        try:
            detector = VideoObjectDetection()
            start = time.time()
            detector.setModelTypeAsRetinaNet()
            detector.setModelPath(
                os.path.join(input_path, "resnet50_coco_best_v2.0.1.h5"))
            detector.loadModel(detection_speed="fastest")
            video_path = detector.detectObjectsFromVideo(
                input_file_path=output_path,
                output_file_path=os.path.join(input_path,
                                              "detected" + videoName),
                frames_per_second=10,
                log_progress=True)

            convert(video_path, input_path + "/dectected" + videoName)
            end = time.time()
            processtime = end - start
            return jsonify({
                'video_path': video_path,
                'processtime': processtime
            })
        except Exception as e:
            raise e
    return jsonify({'error': 'no correct form'})
def object_detection(input_file, out_name, model='ResNet'):
    video_detector = VideoObjectDetection()

    if model == "ResNet":
        video_detector.setModelTypeAsRetinaNet()
        video_detector.setModelPath(
            os.path.join(execution_path,
                         "pretranined_models/resnet50_coco_best_v2.0.1.h5"))

    elif model == "Yolo":
        video_detector.setModelTypeAsYOLOv3()
        video_detector.setModelPath(
            os.path.join(execution_path, "pretranined_models/yolo.h5"))

    else:
        video_detector.setModelTypeAsTinyYOLOv3()
        video_detector.setModelPath(
            os.path.join(execution_path, "pretranined_models/yolo-tiny.h5"))

    video_detector.loadModel()

    vi = video_detector.detectObjectsFromVideo(
        input_file_path=os.path.join(execution_path, input_file),
        output_file_path=os.path.join(execution_path, out_name),
        frames_per_second=10,
        per_second_function=forSeconds,
        per_frame_function=forFrame,
        per_minute_function=forMinute,
        minimum_percentage_probability=30)
Exemple #16
0
 def detectObjects(self):
     detector = VideoObjectDetection()
     detector.setModelTypeAsYOLOv3()
     detector.setModelPath("yolo.h5")
     detector.loadModel()
     video_path = detector.detectObjectsFromVideo(camera_input=self.cam, save_detected_video = True,
                                                 per_frame_function = self.forFrame, output_file_path = "temp.avi" , log_progress=True,
                                                 return_detected_frame= True)
     return
Exemple #17
0
def test_video_detection_tiny_yolov3(clear_keras_session):

    detector = VideoObjectDetection()
    detector.setModelTypeAsTinyYOLOv3()
    detector.setModelPath(model_path=os.path.join(main_folder, "data-models", "yolo-tiny.h5"))
    detector.loadModel(detection_speed="fast")
    video_path = detector.detectObjectsFromVideo(input_file_path=video_file, output_file_path=video_file_output, save_detected_video=True, frames_per_second=30, log_progress=True)

    assert os.path.exists(video_file_output + ".avi")
    assert isinstance(video_path, str)
    os.remove(video_file_output + ".avi")
Exemple #18
0
def detecting():
    video_detector = VideoObjectDetection()
    video_detector.setModelTypeAsRetinaNet()
    video_detector.setModelPath(os.path.join(execution_path, "resnet50_coco_best_v2.0.1_4.h5"))
    # video_detector.setModelPath(os.path.join(execution_path, "v2.h5"))
    video_detector.loadModel()

    vidcap = VideoCap()
    camera = vidcap.camera()
    video_path = video_detector.detectObjectsFromVideo(camera_input=camera, save_detected_video = False,
        frames_per_second=20, log_progress=True, minimum_percentage_probability=30, per_frame_function=forFrame,
        return_detected_frame=True)
def detect_text(file, output_name):

    detector = VideoObjectDetection()
    detector.setModelTypeAsYOLOv3()
    detector.setModelPath("yolo.h5")
    detector.loadModel()

    video_path = detector.detectObjectsFromVideo(input_file_path=file,
                                                 output_file_path=output_name,
                                                 frames_per_second=29,
                                                 log_progress=True)
    return video_path
Exemple #20
0
def tinyYoloVideo(video, model, output):
    detector = VideoObjectDetection()
    detector.setModelTypeAsTinyYOLOv3()
    detector.setModelPath(model)
    detector.loadModel()
    start_time = time.time()
    video_path = detector.detectObjectsFromVideo(input_file_path=video,
                                                 output_file_path=output,
                                                 frames_per_second=29,
                                                 log_progress=True)
    print(video_path)
    print("Total time: %s seconds" % (time.time() - start_time))
Exemple #21
0
class Detection:
    def __init__(self):
        self.path = os.getcwd()

        self.detector = VideoObjectDetection()
        self.detector.setModelTypeAsYOLOv3()
        self.detector.setModelPath("ObjectDetectionTensorflow/yolo.h5")
        self.detector.loadModel(detection_speed="fast")

        self.imgDetector = ObjectDetection()
        self.imgDetector.setModelTypeAsYOLOv3()
        self.imgDetector.setModelPath("ObjectDetectionTensorflow/yolo.h5")
        self.imgDetector.loadModel()

        self.camera = cv2.VideoCapture(0)

    def liveVideo(self):
        #Live to Video
        videoPath = self.detector.detectObjectsFromVideo(
            camera_input=self.camera,
            output_file_path=os.path.join(self.path, "Loaded_Video"),
            frames_per_second=30,
            log_progress=True,
            minimum_percentage_probability=80)
        print(videoPath)
        cv2.imshow('video', videoPath)

    def liveVideoShow(self):
        #Live video detection
        while True:
            ret, frame = self.camera.read()

            img = PIL.Image.fromarray(frame)
            img.save("ObjectDetectionTensorflow/images/pic.png")

            detected = self.imgDetector.detectCustomObjectsFromImage(
                input_image="ObjectDetectionTensorflow/images/pic.png",
                output_image_path="ObjectDetectionTensorflow/images/pic.png",
                minimum_percentage_probability=40)

            for eachObject in detected:
                print(eachObject["name"], " : ",
                      eachObject["percentage_probability"], " : ",
                      eachObject["box_points"])
                print("--------------------------------")

            if cv2.waitKey(33) == ord('a'):
                break

            img = mpimg.imread("ObjectDetectionTensorflow/images/pic.png")

            cv2.imshow('video', img)
Exemple #22
0
def getVideo(request):
    if request.method == "POST":
        try:
            file = request.FILES['sentFile']  #
        except:
            return render(request, 'homepage.html')

        import uuid

        unique_filename = str(uuid.uuid4())
        file_name = default_storage.save(unique_filename, file)

        #  Reading file from storage
        file = default_storage.open(file_name)
        file_url = default_storage.url(file_name)


        detector = VideoObjectDetection()
        detector.setModelTypeAsRetinaNet()
        path = os.path.join(settings.MODELS, "resnet50_coco_best_v2.0.1.h5")
        detector.setModelPath(path)
        detector.loadModel(detection_speed='faster')
        item = set()
        def forFrame(frame_number, output_array, output_count):
            for i in output_array:
                item.add(i['name'])








        video_path = detector.detectObjectsFromVideo(input_file_path='media/'+unique_filename,
                                                     output_file_path='media/imagetest_video'
                                                     , frames_per_second=20, log_progress=True,
                                                     minimum_percentage_probability=30,save_detected_video=True,per_frame_function=forFrame)

        categoryName = objectCategory.objects.filter(object__in=item).values_list('categoryName', flat=True)
        ac = AdvertisementCategory.objects.filter(id__in=categoryName)
        ads = Advertisement.objects.filter(categoryName__in=ac)
        if len(ads)==0:
            return render(request, 'failPredictions.html')
        context = {
            'ac': ac,
            'ads': ads,
        }

        return render(request, 'vid_predictions.html',context)
    else:
        return render(request, 'homepage2.html')
Exemple #23
0
def videoIdentify(input_dir, output_dir):
    execution_path = os.getcwd()
    detector = VideoObjectDetection()
    detector.setModelTypeAsRetinaNet()
    detector.setModelPath(
        os.path.join(execution_path, "resnet50_coco_best_v2.0.1.h5"))
    detector.loadModel()

    video_path = detector.detectObjectsFromVideo(input_file_path=input_dir,
                                                 output_file_path=output_dir,
                                                 frames_per_second=20,
                                                 log_progress=True)
    print(video_path)
Exemple #24
0
def test_object_default(path_in,
                        path_out,
                        suffix='object_default',
                        path_model=os.path.join(os.getcwd(),
                                                '../models/yolo.h5'),
                        speed='fast'):
    detector = VideoObjectDetection()
    detector.setModelTypeAsYOLOv3()
    detector.setModelPath(path_model)
    detector.loadModel(detection_speed=speed)  #fast, faster, fastest, flash

    print(f'starting {suffix}')
    time_0 = time.time()

    detector.detectObjectsFromVideo(input_file_path=path_in,
                                    output_file_path=f'{path_out}_{suffix}',
                                    frames_per_second=20,
                                    per_frame_function=pfh.per_frame_handler,
                                    minimum_percentage_probability=10,
                                    return_detected_frame=True)

    print(f'mode {suffix} finished, elapsed time : {time.time() - time_0}s')
Exemple #25
0
def detect_objects(input_file_path, detection_confidence_threshold=60):
    # detection_confidence_threshold (int between 1 and 99) is the minimum detector confidence needed to include an object

    # Create object detector based on RetinaNet and load model weights
    detector = VideoObjectDetection()
    detector.setModelTypeAsRetinaNet()
    detector.setModelPath('resnet50_coco_best_v2.0.1.h5')
    detector.loadModel()

    all_frames = []

    # Generates a record of all objects detected
    def forFrame(frame_number, output_array, output_count):
        all_frames.append(output_array)
        if frame_number % 100 == 0:
            print(input_file_path + ' Frame ' + str(frame_number))

    detector.detectObjectsFromVideo(
        input_file_path=input_file_path,
        output_file_path='labeled_video',
        frames_per_second=30,
        per_frame_function=forFrame,
        minimum_percentage_probability=detection_confidence_threshold)
    return all_frames
Exemple #26
0
def detect_with_imageai_yolotiny():
    
    execution_path = os.getcwd()
    camera = cv2.VideoCapture(0)
    detector = VideoObjectDetection()
    detector.setModelTypeAsTinyYOLOv3()
    detector.setModelPath(os.path.join(execution_path, "weights/yolo_tiny.h5"))
    detector.loadModel()


    video_model = detector.detectObjectsFromVideo(camera_input=camera,
                                                  output_file_path=os.path.join(execution_path, "camera_detected_video"),
                                                  frames_per_second=20,
                                                  log_progress=True,
                                                  minimum_percentage_probability=40)
Exemple #27
0
def object_detect(time):
    execution_path = os.getcwd()
    camera = cv2.VideoCapture(0)

    detector = VideoObjectDetection()
    detector.setModelTypeAsYOLOv3()
    detector.setModelPath(os.path.join(execution_path, "yolo.h5"))
    detector.loadModel()
    video_path = detector.detectObjectsFromVideo(
        camera_input=camera,
        output_file_path=os.path.join(execution_path, "camera_detected_video"),
        frames_per_second=20,
        per_frame_function=forFrame,
        minimum_percentage_probability=50,
        save_detected_video=True)
    return redirect(index)
Exemple #28
0
def start_analys_frames():
    path = 'Data/'
    file = ['yolo.h5', 'video.avi', 'frames']

    detector = VideoObjectDetection()
    detector.setModelTypeAsYOLOv3()
    detector.setModelPath(path + file[0])
    detector.loadModel()

    video_path = detector.detectObjectsFromVideo(
        input_file_path=path + file[1],
        output_file_path=path + file[2],
        minimum_percentage_probability=40,
        frames_per_second=30,
        log_progress=True)

    c.convert()
Exemple #29
0
def test_video_detection_retinanet():

    detector = VideoObjectDetection()
    detector.setModelTypeAsRetinaNet()
    detector.setModelPath(model_path=os.path.join(
        main_folder, "data-models", "resnet50_coco_best_v2.0.1.h5"))
    detector.loadModel(detection_speed="fastest")
    video_path = detector.detectObjectsFromVideo(
        input_file_path=video_file,
        output_file_path=video_file_output,
        save_detected_video=True,
        frames_per_second=30,
        log_progress=True)

    assert os.path.exists(video_file_output + ".avi")
    assert isinstance(video_path, str)
    os.remove(video_file_output + ".avi")
Exemple #30
0
class itemCamSaveFile():
    def __init__(self, setModePath):
        self.execution_path = os.getcwd()
        self.detector = VideoObjectDetection()
        self.detector.setModelTypeAsYOLOv3()
        self.detector.setModelPath(
            os.path.join(self.execution_path, setModePath))
        self.detector.loadModel()

    def item_CamSaveFile(self, camera, outputFile):
        video_path = self.detector.detectObjectsFromVideo(
            camera_input=camera,
            output_file_path=os.path.join(self.execution_path, outputFile),
            frames_per_second=10,
            log_progress=True,
            minimum_percentage_probability=20)
        print(video_path)
from imageai.Detection import VideoObjectDetection
import os

execution_path = os.getcwd()

detector = VideoObjectDetection()
detector.setModelTypeAsYOLOv3()
detector.setModelPath( os.path.join(execution_path , "yolo.h5"))
detector.loadModel()

video_path = detector.detectObjectsFromVideo(input_file_path=os.path.join(execution_path, "traffic.mp4"),
                                output_file_path=os.path.join(execution_path, "traffic_detected")
                                , frames_per_second=20, log_progress=True)
print(video_path)
        this_colors.append(color_index[eachItem])

    global resized

    if (resized == False):
        manager = plt.get_current_fig_manager()
        manager.resize(width=1000, height=500)
        resized = True

    plt.subplot(1, 2, 1)
    plt.title("Frame : " + str(frame_number))
    plt.axis("off")
    plt.imshow(returned_frame, interpolation="none")

    plt.subplot(1, 2, 2)
    plt.title("Analysis: " + str(frame_number))
    plt.pie(sizes, labels=labels, colors=this_colors, shadow=True, startangle=140, autopct="%1.1f%%")

    plt.pause(0.01)



video_detector = VideoObjectDetection()
video_detector.setModelTypeAsYOLOv3()
video_detector.setModelPath(os.path.join(execution_path, "yolo.h5"))
video_detector.loadModel()

plt.show()

video_detector.detectObjectsFromVideo(input_file_path=os.path.join(execution_path, "traffic.mp4"), output_file_path=os.path.join(execution_path, "video_frame_analysis") ,  frames_per_second=20, per_frame_function=forFrame,  minimum_percentage_probability=30, return_detected_frame=True)