Пример #1
0
def objectDection(execution_path, save_path, fileName):
    '''
        detecting object for each frame
    '''
    detector = VideoObjectDetection()
    detector.setModelTypeAsYOLOv3()
    detector.setModelPath(os.path.join(execution_path, "yolo.h5"))
    detector.loadModel()

    global FILENAME
    FILENAME = fileName.split('.')[0]

    custom_objects = detector.CustomObjects(car=True, truck=True, bus=True)
    video_path = detector.detectCustomObjectsFromVideo(
        custom_objects=custom_objects,
        input_file_path=os.path.join(execution_path, fileName),
        output_file_path=os.path.join(save_path,
                                      fileName.split(".")[0] + "_detected"),
        frames_per_second=30,
        frame_detection_interval=1,
        per_frame_function=forFrame,
        #                                    per_second_function= forSecond,
        minimum_percentage_probability=79,
        #                                    return_detected_frame=True,
        log_progress=True)
    return (fileName.split(".")[0] + "_detected.avi")
Пример #2
0
def detection(input_path, output_path, yolo_model_path):
    """The function opens the input video and goes through each frame.
       Performs object recognition for each frame by using a YOLO model
       and writes the frame including the detection to the output video.
       :param input_path:      input video path
       :type input_path:       string
       :param output_path:     output video path
       :type output_path:      string
       :param yolo_model_path: YOLO model path
       :type yolo_model_path:  string
       :return: None
    """
    detector = VideoObjectDetection()

    # this function sets the model type of the object
    # detection instance you created to the YOLOv3 model
    detector.setModelTypeAsYOLOv3()

    # this function accepts a string that must be the
    # path to the model file, it must correspond to the
    # model typeset for the object detection instance
    detector.setModelPath(yolo_model_path)

    # this function loads the model from the path given
    detector.loadModel()

    # the function performs object detection on a video
    # file or video live-feed after the model has been
    # loaded into the instance that was created
    detector.detectCustomObjectsFromVideo(input_file_path=input_path,
                                          output_file_path=output_path,
                                          frames_per_second=20,
                                          log_progress=True)
Пример #3
0
def main():
    execution_path = os.getcwd()
    dic = {}

    def forFrame(frame_number, output_array, output_count):
        print("Frame Number : ", frame_number)
        dic[frame_number] = output_array

    video_detector = VideoObjectDetection()
    video_detector.setModelTypeAsYOLOv3()
    video_detector.setModelPath(os.path.join(execution_path, "yolo.h5"))
    video_detector.loadModel(detection_speed="fast")

    video_detector.detectObjectsFromVideo(input_file_path=os.path.join(
        execution_path, "video.mp4"),
                                          save_detected_video=False,
                                          frames_per_second=20,
                                          per_frame_function=forFrame,
                                          minimum_percentage_probability=30)
    for k, v in dic.items():
        print(str(k) + " : " + str(v))
    video_path = video_detector.detectObjectsFromVideo(
        input_file_path=os.path.join(execution_path, "video.mp4"),
        output_file_path=os.path.join(execution_path, "video_output"),
        frames_per_second=29,
        minimum_percentage_probability=30)
def detectionModelLoad(model_path, mod="normal"):
    detector = VideoObjectDetection()
    detector.setModelTypeAsRetinaNet()
    detector.setModelPath(model_path)
    detector.loadModel(detection_speed=mod)

    return detector
Пример #5
0
def detected():
    form = detectForm(request.args)
    if form.validate():
        input_path = form.data['inputpath']
        output_path = form.data['outputpath']
        videoName = form.data['videoname']
        try:
            detector = VideoObjectDetection()
            start = time.time()
            detector.setModelTypeAsRetinaNet()
            detector.setModelPath(
                os.path.join(input_path, "resnet50_coco_best_v2.0.1.h5"))
            detector.loadModel(detection_speed="fastest")
            video_path = detector.detectObjectsFromVideo(
                input_file_path=output_path,
                output_file_path=os.path.join(input_path,
                                              "detected" + videoName),
                frames_per_second=10,
                log_progress=True)

            convert(video_path, input_path + "/dectected" + videoName)
            end = time.time()
            processtime = end - start
            return jsonify({
                'video_path': video_path,
                'processtime': processtime
            })
        except Exception as e:
            raise e
    return jsonify({'error': 'no correct form'})
Пример #6
0
def detect(var_name, var_model_path):
    #
    ##
    var_in_path = os.path.join(os.getcwd(), "cache", var_name + "IN.mp4")
    var_temp_path = os.path.join(os.getcwd(), "cache", var_name + "TEMP")
    # var_out_path = os.path.join(os.getcwd(), "cache", var_name + "OUT.mp4")
    #
    ##  Initiate the detector.
    var_detector = VideoObjectDetection()
    var_detector.setModelTypeAsRetinaNet()
    var_detector.setModelPath(var_model_path)
    var_detector.loadModel(detection_speed="fast")
    #
    ##  Perform the object detection.
    var_detector.detectObjectsFromVideo(input_file_path=var_in_path,
                                        output_file_path=var_temp_path,
                                        frames_per_second=30,
                                        log_progress=True,
                                        frame_detection_interval=1,
                                        minimum_percentage_probability=15)
    #
    ##  Convert the format of output video to MP4.
    var_clip = moviepy.VideoFileClip("cache/" + var_name + "TEMP.avi")
    var_clip.write_videofile("cache/" + var_name + "OUT.mp4")
    os.remove(var_temp_path + ".avi")
    return "Detect Finish!"
Пример #7
0
def detect_with_imageai_yolov3():

    """
    <-- CODE IF YOU NEED TO LOAD SOME VIDEO -->
    execution_path = os.getcwd()
    detector = VideoObjectDetection()
    detector.setModelTypeAsYOLOv3()
    detector.setModelPath(os.path.join(execution_path, "weights/yolo.h5"))
    detector.loadModel()

    video_path = detector.detectObjectsFromVideo(
        input_file_path=os.path.join(execution_path, "example_video.mp4"),
        output_file_path=os.path.join(execution_path, "Yolo.mp4"),
        frames_per_second=20, log_progress=True
    )
    """

    execution_path = os.getcwd()
    camera = cv2.VideoCapture(0)
    detector = VideoObjectDetection()
    detector.setModelTypeAsYOLOv3()
    detector.setModelPath(os.path.join(execution_path, "weights/yolo.h5"))
    detector.loadModel()


    video_model = detector.detectObjectsFromVideo(camera_input=camera,
                                                  output_file_path=os.path.join(execution_path, "camera_detected_video"),
                                                  frames_per_second=20,
                                                  log_progress=True,
                                                  minimum_percentage_probability=40)
Пример #8
0
def GetObjectsList(name,expansion):
    path_yolo=os.path.abspath("yolo.h5")
    video=VideoFileClip(f"{path}.{expansion}")
    video=video.set_fps(1)
    video=video.without_audio()
    video=video.fx(vfx.speedx,5)
    video.write_videofile(f"{name}_temp.{expansion}")

    ListObjects=[]
    def forFrame(frame_number, output_array, output_count):
        
        for k in output_array:
            if k["name"] in ListObjects:
                pass
            else:
                ListObjects.append(k["name"])
        
        



    video_detector = VideoObjectDetection()
    video_detector.setModelTypeAsYOLOv3()
    video_detector.setModelPath(path_yolo)
    video_detector.loadModel()


    video_detector.detectObjectsFromVideo(input_file_path=f"{name}_temp.{expansion}", save_detected_video=False, per_frame_function=forFrame,  minimum_percentage_probability=70,log_progress=False,display_percentage_probability=False)
    os.remove(f"{name}_temp.{expansion}")
    return ListObjects
Пример #9
0
class MyDetector:
    myDetector = None
    def __init__(self):
        execution_path = os.getcwd()
        self.myDetector = VideoObjectDetection()
        # self.myDetector.setModelTypeAsYOLOv3()
        # self.myDetector.setModelPath(os.path.join(execution_path, "yolo.h5"))
        #self.myDetector.loadModel(detection_speed="flash")

    def process_ml(self):
        execution_path = os.getcwd()
        self.myDetector = VideoObjectDetection()
        self.myDetector.setModelTypeAsYOLOv3()
        self.myDetector.setModelPath(os.path.join(execution_path, "yolo.h5"))
        self.myDetector.loadModel(detection_speed="flash")
        video_path = self.myDetector.detectObjectsFromVideo(input_file_path=os.path.join(execution_path, "input.mp4"),
                                                output_file_path=os.path.join(execution_path, "upload/output_detected_1"), frames_per_second=29,
                                                
                                                per_frame_function=forFrame,
                                                per_second_function=forSeconds,
                                                per_minute_function=forMinute,
                                                video_complete_function=forFull,
                                                
                                                minimum_percentage_probability=10,

                                                log_progress=True)
        return video_path
def test_video_detection_retinanet_analysis():

    try:
        keras.backend.clear_session()
    except:
        None

    detector = VideoObjectDetection()
    detector.setModelTypeAsRetinaNet()
    detector.setModelPath(model_path=os.path.join(
        main_folder, "data-models", "resnet50_coco_best_v2.0.1.h5"))
    detector.loadModel(detection_speed="fastest")
    video_path = detector.detectObjectsFromVideo(
        input_file_path=video_file,
        output_file_path=video_file_output,
        save_detected_video=True,
        frames_per_second=30,
        log_progress=True,
        per_frame_function=forFrame,
        per_second_function=forSecond,
        return_detected_frame=True)

    assert os.path.exists(video_file_output + ".avi")
    assert isinstance(video_path, str)
    os.remove(video_file_output + ".avi")
Пример #11
0
    def Crop_video(self, params):
        global input_video

        if 'videoName' in params.keys():
            input_video = str(params['videoName'])
            print(input_video)
        else:
            return "Error: No Video Name field provided. Please specify an url."
        detector = VideoObjectDetection()
        detector.setModelTypeAsYOLOv3()
        detector.setModelPath(
            os.path.join(parent, 'Mobilaty\\project\\public\\yolo.h5'))
        detector.loadModel()
        custom_objects = detector.CustomObjects(cell_phone=True)

        video_path = detector.detectCustomObjectsFromVideo(
            custom_objects=custom_objects,
            input_file_path=os.path.join(Base_Video_path, input_video),
            output_file_path=os.path.join(Base_Video_path,
                                          "traffic_custom_detected"),
            save_detected_video=False,
            frames_per_second=1,
            per_frame_function=forFrame)
        os.remove(os.path.join(Base_Video_path, "traffic_custom_detected"))
        return "Done!"
Пример #12
0
def detect_with_imageai_retinanet():

    def forFrame(frame_number, output_array, output_count):
        """Детекция bounding box'ов"""

        print("ДЛЯ КАДРА " , frame_number)
        print('Объект:', output_array[0]['name'])
        print('Вероятность:', output_array[0]['percentage_probability'])
        print('Bounding box:', output_array[0]['box_points'])
        print("Уникальных объектов: ", output_count[output_array[0]['name']])
        print("------------END OF A FRAME --------------\n\n")


    execution_path = os.getcwd()
    camera = cv2.VideoCapture(0)
    detector = VideoObjectDetection()
    detector.setModelTypeAsRetinaNet()
    detector.setModelPath(os.path.join(execution_path, "weights/resnet50_coco_best_v2.0.1.h5"))
    detector.loadModel()


    video_model = detector.detectObjectsFromVideo(camera_input=camera,
                                                  output_file_path=os.path.join(execution_path, "camera_detected_video"),
                                                  frames_per_second=20,
						  per_frame_function=forFrame,
                                                  minimum_percentage_probability=40)
    def capture(self, callback, gray=False):
        if not self.cap:
            sys.exit('The capture is not ready')

        while True:
            
            # esc, q
            ch = cv2.waitKey(10) & 0xFF
            if ch == 27 or ch == ord('q'):
                break
            t = cv2.getTickCount()
            
            execution_path = os.getcwd()
            detector = VideoObjectDetection()
            detector.setModelTypeAsTinyYOLOv3()
            detector.setModelPath(os.path.join(execution_path , "yolo-tiny.h5"))
            detector.loadModel()
            video_path = detector.detectObjectsFromVideo(
                    camera_input=self.cap,
                    output_file_path=os.path.join(execution_path, "camera_detected_video"),
                    frames_per_second=10, log_progress=True, minimum_percentage_probability=30)
            print(video_path)
            
        
            if self.openni:
                if not self.cap.grab():
                    sys.exit('Grabs the next frame failed')
                ret, depth = self.cap.retrieve(cv2.CAP_OPENNI_DEPTH_MAP)
class VideoThread(Thread):
    opencvCamera = None;
    ShouldClose = False
    Video_Detect = None
    OutPath = VideoPath
    #OutPath = r"C:\Users\JackXu\PycharmProjects\untitled\VideoProcessing"
    def __init__(self,CameraObj):
        Thread.__init__(self)
        self.opencvCamera = CameraObj




    def run(self):
        if (self.ShouldClose) == True:
            self.exit()
        else:
            self.Video_Detect = VideoObjectDetection()
            self.Video_Detect.setModelTypeAsYOLOv3()
            #BeginTime = time.time()
            self.Video_Detect.setModelPath(ModelPath)
            #EndTime = time.time()
            #print(" How long to set and load", EndTime - BeginTime)
            #self.Video_Detect.setModelPath(r"C:\Users\JackXu\PycharmProjects\untitled\yolo.h5")
            self.Video_Detect.loadModel("fastest")
            self.Video_Detect.detectObjectsFromVideo(camera_input= self.opencvCamera,return_detected_frame=True,
                                                     output_file_path=VideoPath,
                                                      frames_per_second=30,
                                                     minimum_percentage_probability=40,
                                                     per_frame_function=showCV, save_detected_video=False)
            self.ShouldClose = True
Пример #15
0
def detection_of_vehicles_from_video(folder1, folder2, findex):
    '''
    Detects and saves the arrays containing bounding boxes of detected
    vehicles from videos of a given folder

    Parameters:
    folder1 : path of the folder containing videos
    folder2 : path of the folder in which arrays are required to be stored
    findex : index number of the first video in folder1 
    '''

    #modifying forFrame function of ImageAI to make a list
    #of bounding box coordinates for vehichles detected in a
    #particular frame
    def forFrame(frame_number, output_array, output_count):

        bboxes = []

        for i in range(len(output_array)):
            bboxes.append(list(output_array[i]['box_points']))

        B.append(bboxes)

    #reading and sorting the filenames of folder1
    videos = glob.glob(folder1 + '/video*.MOV')
    videos = natsort.natsorted(videos)

    #set and load ResNet Model for detection of vehicles
    execution_path = os.getcwd()
    detector = VideoObjectDetection()
    detector.setModelTypeAsRetinaNet()
    #use detector.setModelTypeAsYOLOv3() to use YOLOv3 instead of RetinaNet
    detector.setModelPath(
        os.path.join(
            execution_path,
            "/home/siddhi/Desktop/RoadCrossingAssistant_FY_Project_Data/resnet50_coco_best_v2.0.1.h5"
        ))
    #use model path of yolo.h5 if to use YOLOv3 instead of RetinaNet
    detector.loadModel()
    custom_objects = detector.CustomObjects(bicycle=True,
                                            motorcycle=True,
                                            car=True,
                                            truck=True)

    for video in videos:
        print('processing' + video)
        B = []
        detector.detectCustomObjectsFromVideo(
            save_detected_video=False,
            custom_objects=custom_objects,
            input_file_path=os.path.join(execution_path, video),
            frames_per_second=30,
            per_frame_function=forFrame,
            minimum_percentage_probability=40)
        B = np.array(B)
        print('saving array for video' + video + '\n shape of array: ' +
              str(B.shape))
        np.save(folder2 + '/array' + str(findex), B)
        findex = findex + 1
Пример #16
0
 def detectObjects(self):
     detector = VideoObjectDetection()
     detector.setModelTypeAsYOLOv3()
     detector.setModelPath("yolo.h5")
     detector.loadModel()
     video_path = detector.detectObjectsFromVideo(camera_input=self.cam, save_detected_video = True,
                                                 per_frame_function = self.forFrame, output_file_path = "temp.avi" , log_progress=True,
                                                 return_detected_frame= True)
     return
def load_model():
    global detector
    global graph
    detector = VideoObjectDetection()
    detector.setModelTypeAsYOLOv3()
    detector.setModelPath(os.path.join(execution_path, "video-yolo.h5"))
    detector.loadModel()

    graph = tf.get_default_graph()
Пример #18
0
def test_video_detection_tiny_yolov3(clear_keras_session):

    detector = VideoObjectDetection()
    detector.setModelTypeAsTinyYOLOv3()
    detector.setModelPath(model_path=os.path.join(main_folder, "data-models", "yolo-tiny.h5"))
    detector.loadModel(detection_speed="fast")
    video_path = detector.detectObjectsFromVideo(input_file_path=video_file, output_file_path=video_file_output, save_detected_video=True, frames_per_second=30, log_progress=True)

    assert os.path.exists(video_file_output + ".avi")
    assert isinstance(video_path, str)
    os.remove(video_file_output + ".avi")
def detect_text(file, output_name):

    detector = VideoObjectDetection()
    detector.setModelTypeAsYOLOv3()
    detector.setModelPath("yolo.h5")
    detector.loadModel()

    video_path = detector.detectObjectsFromVideo(input_file_path=file,
                                                 output_file_path=output_name,
                                                 frames_per_second=29,
                                                 log_progress=True)
    return video_path
def detect():
    """
    Count number of people in test video per second
    """
    execution_path = os.getcwd()

    # Add your camera credential
    # camera = cv2.VideoCapture('rtsp://*****:*****@0.0.0.0')

    # Test Video
    camera = cv2.VideoCapture('test.mp4')

    def getSizeVD(vcap):
        """
        Get Size of Camera Frame
        """
        if vcap.isOpened():
            width  = vcap.get(3)
            height = vcap.get(4)
            return width*height

    video_size = getSizeVD(camera)

    def forFrame(frame_number, output_array, output_count):
        """
        Get size of person on frame
        """
        size_list = []
        for item in output_array:
            if item.get('name', '') is 'person':
                if item.get('box_points', []):
                    [x1,y1,x2,y2] = item.get('box_points', [])
                    size = (x2 - x1) * (y2 - y1)
                    size_list.append(size)
        if size_list:
            if max(size_list) > video_size * PERCENT:
                # Add your shell script
                print("Detected Closed Person")

    # Create Video Detection Module based on Yolo
    video_detector = VideoObjectDetection()
    video_detector.setModelTypeAsYOLOv3()
    video_detector.setModelPath(os.path.join(execution_path, "yolo.h5"))
    video_detector.loadModel()

    video_detector.detectObjectsFromVideo(
        camera_input=camera,
        output_file_path=os.path.join(execution_path, "test"),
        # save_detected_video=False,
        frames_per_second=10,
        per_frame_function=forFrame,
        minimum_percentage_probability=30
    )
Пример #21
0
def tinyYoloVideo(video, model, output):
    detector = VideoObjectDetection()
    detector.setModelTypeAsTinyYOLOv3()
    detector.setModelPath(model)
    detector.loadModel()
    start_time = time.time()
    video_path = detector.detectObjectsFromVideo(input_file_path=video,
                                                 output_file_path=output,
                                                 frames_per_second=29,
                                                 log_progress=True)
    print(video_path)
    print("Total time: %s seconds" % (time.time() - start_time))
Пример #22
0
def detecting():
    video_detector = VideoObjectDetection()
    video_detector.setModelTypeAsRetinaNet()
    video_detector.setModelPath(os.path.join(execution_path, "resnet50_coco_best_v2.0.1_4.h5"))
    # video_detector.setModelPath(os.path.join(execution_path, "v2.h5"))
    video_detector.loadModel()

    vidcap = VideoCap()
    camera = vidcap.camera()
    video_path = video_detector.detectObjectsFromVideo(camera_input=camera, save_detected_video = False,
        frames_per_second=20, log_progress=True, minimum_percentage_probability=30, per_frame_function=forFrame,
        return_detected_frame=True)
Пример #23
0
def getVideo(request):
    if request.method == "POST":
        try:
            file = request.FILES['sentFile']  #
        except:
            return render(request, 'homepage.html')

        import uuid

        unique_filename = str(uuid.uuid4())
        file_name = default_storage.save(unique_filename, file)

        #  Reading file from storage
        file = default_storage.open(file_name)
        file_url = default_storage.url(file_name)


        detector = VideoObjectDetection()
        detector.setModelTypeAsRetinaNet()
        path = os.path.join(settings.MODELS, "resnet50_coco_best_v2.0.1.h5")
        detector.setModelPath(path)
        detector.loadModel(detection_speed='faster')
        item = set()
        def forFrame(frame_number, output_array, output_count):
            for i in output_array:
                item.add(i['name'])








        video_path = detector.detectObjectsFromVideo(input_file_path='media/'+unique_filename,
                                                     output_file_path='media/imagetest_video'
                                                     , frames_per_second=20, log_progress=True,
                                                     minimum_percentage_probability=30,save_detected_video=True,per_frame_function=forFrame)

        categoryName = objectCategory.objects.filter(object__in=item).values_list('categoryName', flat=True)
        ac = AdvertisementCategory.objects.filter(id__in=categoryName)
        ads = Advertisement.objects.filter(categoryName__in=ac)
        if len(ads)==0:
            return render(request, 'failPredictions.html')
        context = {
            'ac': ac,
            'ads': ads,
        }

        return render(request, 'vid_predictions.html',context)
    else:
        return render(request, 'homepage2.html')
Пример #24
0
class Detection:
    def __init__(self):
        self.path = os.getcwd()

        self.detector = VideoObjectDetection()
        self.detector.setModelTypeAsYOLOv3()
        self.detector.setModelPath("ObjectDetectionTensorflow/yolo.h5")
        self.detector.loadModel(detection_speed="fast")

        self.imgDetector = ObjectDetection()
        self.imgDetector.setModelTypeAsYOLOv3()
        self.imgDetector.setModelPath("ObjectDetectionTensorflow/yolo.h5")
        self.imgDetector.loadModel()

        self.camera = cv2.VideoCapture(0)

    def liveVideo(self):
        #Live to Video
        videoPath = self.detector.detectObjectsFromVideo(
            camera_input=self.camera,
            output_file_path=os.path.join(self.path, "Loaded_Video"),
            frames_per_second=30,
            log_progress=True,
            minimum_percentage_probability=80)
        print(videoPath)
        cv2.imshow('video', videoPath)

    def liveVideoShow(self):
        #Live video detection
        while True:
            ret, frame = self.camera.read()

            img = PIL.Image.fromarray(frame)
            img.save("ObjectDetectionTensorflow/images/pic.png")

            detected = self.imgDetector.detectCustomObjectsFromImage(
                input_image="ObjectDetectionTensorflow/images/pic.png",
                output_image_path="ObjectDetectionTensorflow/images/pic.png",
                minimum_percentage_probability=40)

            for eachObject in detected:
                print(eachObject["name"], " : ",
                      eachObject["percentage_probability"], " : ",
                      eachObject["box_points"])
                print("--------------------------------")

            if cv2.waitKey(33) == ord('a'):
                break

            img = mpimg.imread("ObjectDetectionTensorflow/images/pic.png")

            cv2.imshow('video', img)
class itemVideoSaveFile():
    def __init__(self,setModelPath):
        self.execution_path = os.getcwd()
        self.detetor = VideoObjectDetection()
        self.detetor.setModelTypeAsRetinaNet()
        self.detetor.setModelPath(os.path.join(self.execution_path,setModelPath))
        self.detetor.loadModel()
    
    def items_VideoSaveFile(self, inputFile, outputFile):
        video_path = self.detetor.detectCustomObjectsFromVideo(input_file_path=os.path.join(self.execution_path,inputFile),
        output_file_path=os.path.join(self.execution_path, outputFile),
        frames_per_second=20, log_progress=True)
        print(video_path)
Пример #26
0
def videoDetectorInit() :
    showThreadInfo("VI")
    global video_detector
    if video_detector == None :
        video_detector = VideoObjectDetection()
        video_detector.setModelTypeAsYOLOv3()
        video_detector.setModelPath(os.path.join(execution_path, "yolo.h5")) # Download the model via this link https://github.com/OlafenwaMoses/ImageAI/releases/tag/1.0
        loginfo("model startload ", datetime.datetime.now())
        video_detector.loadModel(detection_speed="normal")
        loginfo("videoDetector init: ", video_detector)
        loginfo("model loaded ", datetime.datetime.now())

    return
Пример #27
0
def videoIdentify(input_dir, output_dir):
    execution_path = os.getcwd()
    detector = VideoObjectDetection()
    detector.setModelTypeAsRetinaNet()
    detector.setModelPath(
        os.path.join(execution_path, "resnet50_coco_best_v2.0.1.h5"))
    detector.loadModel()

    video_path = detector.detectObjectsFromVideo(input_file_path=input_dir,
                                                 output_file_path=output_dir,
                                                 frames_per_second=20,
                                                 log_progress=True)
    print(video_path)
Пример #28
0
def start():
    global vf
    ifp = entry_file_path.get()
    ofp = entry_file_path1.get()
    if vf == 0:
        try:
            print("Starting to render video")
            detector = VideoObjectDetection()
            detector.setModelTypeAsYOLOv3()
            detector.setModelPath(os.path.join(exec_path, "yolo.h5"))
            detector.loadModel()
        except:
            wget.download(
                "https://github.com/OlafenwaMoses/ImageAI/releases/download/1.0/yolo.h5"
            )
            print("Starting to render video")
            detector = VideoObjectDetection()
            detector.setModelTypeAsYOLOv3()
            detector.setModelPath(os.path.join(exec_path, "yolo.h5"))
            detector.loadModel()
            list = detector.detectObjectsFromVideo(
                input_file_path=os.path.join(exec_path, ifp),
                output_file_path=os.path.join(exec_path, ofp),
                frames_per_second=20)
        label = Label(root, text="Succeful!", fg="green")
        label.pack()
    elif vf == 1:
        try:
            print("Starting to render photo")
            detector = ObjectDetection()
            detector.setModelTypeAsRetinaNet()
            detector.setModelPath(
                os.path.join(exec_path, "resnet50_coco_best_v2.0.1.h5"))
            detector.loadModel()
        except OSError:
            wget.download(
                "https://github.com/OlafenwaMoses/ImageAI/releases/download/1.0/resnet50_coco_best_v2.0.1.h5"
            )
            print("Starting to render photo")
            detector = ObjectDetection()
            detector.setModelTypeAsRetinaNet()
            detector.setModelPath(
                os.path.join(exec_path, "resnet50_coco_best_v2.0.1.h5"))
            detector.loadModel()
            list = detector.detectObjectsFromImage(
                input_image=os.path.join(exec_path, ifp),
                output_image_path=os.path.join(exec_path, ofp),
                display_percentage_probability=True,
                display_object_name=True)
        label = Label(root, text="Succeful!", fg="green")
        label.pack()
    elif vf == 2:
        image = face_recognition.load_image_file(exec_path + "/" + ifp)
        face_landmarks_list = face_recognition.face_landmarks(image)
        pil_image = Image.fromarray(image)
        d = ImageDraw.Draw(pil_image)
        for face_landmarks in face_landmarks_list:
            for facial_feature in face_landmarks.keys():
                d.line(face_landmarks[facial_feature], width=5)
        pil_image.show()
Пример #29
0
    def start(self):

        detector = VideoObjectDetection()
        detector.setModelTypeAsRetinaNet()
        detector.setModelPath(
            os.path.join(self.execution_path, "resnet50_coco_best_v2.0.1.h5"))
        detector.loadModel()
        detector.detectObjectsFromVideo(camera_input=self.cap,
                                        output_file_path=os.path.join(
                                            self.execution_path,
                                            "video_frame_analysis"),
                                        frames_per_second=30,
                                        per_frame_function=forFrame,
                                        minimum_percentage_probability=70,
                                        return_detected_frame=True)
Пример #30
0
def detect_with_imageai_yolotiny():
    
    execution_path = os.getcwd()
    camera = cv2.VideoCapture(0)
    detector = VideoObjectDetection()
    detector.setModelTypeAsTinyYOLOv3()
    detector.setModelPath(os.path.join(execution_path, "weights/yolo_tiny.h5"))
    detector.loadModel()


    video_model = detector.detectObjectsFromVideo(camera_input=camera,
                                                  output_file_path=os.path.join(execution_path, "camera_detected_video"),
                                                  frames_per_second=20,
                                                  log_progress=True,
                                                  minimum_percentage_probability=40)
Пример #31
0
from imageai.Detection import VideoObjectDetection
import os

execution_path = os.getcwd()

detector = VideoObjectDetection()
detector.setModelTypeAsYOLOv3()
detector.setModelPath( os.path.join(execution_path , "yolo.h5"))
detector.loadModel()

video_path = detector.detectObjectsFromVideo(input_file_path=os.path.join(execution_path, "traffic.mp4"),
                                output_file_path=os.path.join(execution_path, "traffic_detected")
                                , frames_per_second=20, log_progress=True)
print(video_path)
from imageai.Detection import VideoObjectDetection
import os

execution_path = os.getcwd()

detector = VideoObjectDetection()
detector.setModelTypeAsRetinaNet()
detector.setModelPath( os.path.join(execution_path , "resnet50_coco_best_v2.0.1.h5"))
detector.loadModel(detection_speed="flash")

custom_objects = detector.CustomObjects(person=True, bicycle=True, motorcycle=True)

video_path = detector.detectCustomObjectsFromVideo(custom_objects=custom_objects, input_file_path=os.path.join(execution_path, "traffic-small.mp4"),
                                output_file_path=os.path.join(execution_path, "traffic_small_custom_flash_detected")
                                , frames_per_second=20, log_progress=True)
print(video_path)