def main():
    input_file_name, model_name, show_video = check_args()

    print(f"Input Video: {input_file_name}")
    print(f"Model Name: {model_name}")

    detector = Detector()
    detector.initialize(model_name, input_file_name)

    can_read_frames = True
    current_frame = 0

    start_time = time.time()
    while can_read_frames:
        current_frame += 1
        can_read_frames, frame = detector.analyze_frame(current_frame)

        print(f"\rAnalyzing Frame {current_frame}", end='')
        if current_frame % 3 == 0:
            print(" \\", end='')
        elif current_frame % 3 == 1:
            print(" |", end='')
        elif current_frame % 3 == 2:
            print(" /", end='')
        if show_video and frame is not None:
            cv2.imshow("Frame", frame)
            cv2.waitKey(1)

    analyze_time = int(time.time() - start_time)

    print()
    print(f"Total Frames: {current_frame}")
    print(f"Analyze Time: {analyze_time}s")
    print(f"Average Frames Per Second: {current_frame // analyze_time}")
class DetectorThread(QThread):
    update_image_signal = pyqtSignal(np.ndarray)
    video_finished_signal = pyqtSignal()

    def __init__(self):
        super().__init__()
        self.Detector = Detector()
        self.current_frame_count = 0
        self.is_running = False
        self.is_paused = False
        self.is_video_finished = True
        self.video_path = ""
        self.model_name = ""

    def set_video_path(self, video_path):
        self.reset()
        self.video_path = video_path

    def set_model_name(self, model_name):
        self.reset()
        self.model_name = model_name

    def initialize_detector(self):
        if self.is_video_finished:
            self.Detector.initialize(self.model_name, self.video_path)

    def run(self):
        self.is_running = True
        while self.is_running:
            while not self.is_paused:
                can_analyze, analyzed_frame = self.Detector.analyze_frame(
                    self.current_frame_count)
                self.current_frame_count += 1
                if can_analyze:
                    self.is_video_finished = False
                    if analyzed_frame is not None:
                        self.update_image_signal.emit(analyzed_frame)
                else:
                    self.video_finished_signal.emit()
                    self.is_paused = True

    def reset(self):
        self.is_video_finished = True
        self.current_frame_count = 0

    def pause(self):
        self.is_paused = True
Пример #3
0
from Detector import Detector
import os
import cv2
import utils
import numpy as np

dirimgs = os.path.join('/home/xian/imaxes_politicos/Imaxes')
dircaras = os.path.join('/home/xian/imaxes_politicos/Caras')
factor = 2

detector = Detector('SSD-mobilenet-face', threshold=0.5)
detector.initialize()

people = os.listdir(dirimgs)

for person in people:
	imgs = os.listdir(os.path.join(dirimgs, person))
	for imgname in imgs:
		print(person + ' - ' + imgname)
		imaxe = cv2.imread(os.path.join(dirimgs, person, imgname))
		result = detector.detect(imaxe)
		if len(result) == 0:
			continue
		elif len(result) == 1:
			detection = result[0]
		else:
			conf = []
			for det in result:
				conf.append(det.conf)
			idx = np.argmax(np.float32(conf))
			detection = result[idx]
Пример #4
0
class VideoProcessor:
    def __init__(self, keep_history=False):
        self.keep_history = keep_history
        self.tracker = Tracker()
        self.tracks_history = []
        self.frames = []
        self.detections = []

    def initialize(self):
        #self.detector = Detector('YOLO')
        # self.detector = Detector('SSD-mobilenet-face')
        self.detector = Detector('SSD-brainlab')
        self.detector.initialize()

    def process_frame(self, frame):
        print('')
        # Detect in the current frame:
        self.image = frame
        self.detections = self.detector.detect(frame)
        # Update tracker:
        self.tracker.update_tracks(self.detections)
        # Update history:
        if self.keep_history:
            self.frames.append(frame)
            self.update_history()

    def get_last_frame_with_detections(self):
        img_cp = self.image.copy()
        print(str(len(self.tracker.get_tracks())) + ' tracks')
        for tr in self.tracker.get_tracks():
            detection = tr.get_last_detection()
            x = int(detection.x_center)
            y = int(detection.y_center)
            w = int(detection.width)//2
            h = int(detection.height)//2
            if tr.get_nframes_missing() > 0:
                print('missing frame for track ' + str(tr.get_id()) + ' - nframes_missing: ' + str(tr.get_nframes_missing()))
                color = (255, 0, 0)
            else:
                color = (0, 255, 0)
            cv2.rectangle(img_cp, (x-w,y-h), (x+w,y+h), color, 2)
            cv2.rectangle(img_cp, (x-w,y-h-20), (x+w,y-h), (125,125,125), -1)
            cv2.putText(img_cp, detection.class_name + ' ' + str(tr.get_id()) + ' : %.2f' % detection.conf, (x-w+5,y-h-7), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0,0,0), 1)
        return img_cp

    def clear(self):
        self.tracker.clear()
        self.tracks_history = []
        self.frames = []
        self.detections = []

    def update_history(self):
        for track in self.tracker.tracks:
            exists = False
            for i in range(len(self.tracks_history)):
                if self.tracks_history[i].id == track.id:
                    exists = True
                    self.tracks_history[i].update(track)
                    break
            if not exists:
                self.tracks_history.append(TrackHistory(track))