def start_processing(self):
        
        if self.input_source is not None:

            file_stream = FileVideoStream(self.input_source, queue_size=self.app_imutils_queue_size).start()
            
            time.sleep(0.001)            
            detector = self.initializeDetector()
            self.tracker = self.initializeTracker()
            self.setDataset()
                
            fps = FPS().start()
            
            frame_id = 0
            all_boxes = {}
            tracking_boxes = []

            while (not self.source_changed) and file_stream.running(): 
                
                time.sleep(0.001)
                
                try:
                        
                    self.image = file_stream.read()
                
                    if frame_id % self.app_process_every_nth_frame == 0:                            
                                           
                        if(self.image is not None):

                            vis = self.image.copy()                            
                            cls_boxes = None 
                            cls_segms = None 
                            cls_keyps = None 
                            timers = defaultdict(Timer)
                            t = time.time()
                            fps.update()
                            fps.stop()
                            
                            self.logger.info('Processing file {}'.format(self.input_source))
                            self.logger.info('Processing frame {}'.format(frame_id))
                            
                            fps_text = "FPS " + "{:.2f}".format(fps.fps())
                            self.logger.info('FPS: ' + fps_text)        
                            
                            if self.app_do_detection and not self.source_changed:
                                
                                cls_boxes, cls_segms, cls_keyps = self.infer(vis, timers, detector)     
                                                                                                
                                all_boxes[frame_id] = cls_boxes
                                
                                self.logger.info('Inference time: {:.3f}s'.format(time.time() - t))          
                                
                                for k, v in timers.items():
                                    self.logger.info(' | {}: {:.3f}s'.format(k, v.average_time))
                                if frame_id == 0:
                                    self.logger.info(
                                        ' \ Note: inference on the first image will be slower than the '
                                        'rest (caches and auto-tuning need to warm up)'
                                    )            
                                fps_text = "FPS " + "{:.2f}".format(fps.fps())
                                self.logger.info('FPS: ' + fps_text)
            
                                                            
                                if self.app_display_det_result_img:
                                    if frame_id % self.app_display_det_every_nth_frame == 0 :                                        
                                        vis = self.visualize_det(vis, cls_boxes, fps_text, 
                                                                 segms=cls_segms, keypoints=cls_keyps)
      
            
                                if self.app_save_det_result_img:
                                    if not self.app_display_det_result_img:
                                        ret = self.visualize_det(vis, cls_boxes, fps_text, 
                                                                 segms=cls_segms, keypoints=cls_keyps)
                                        self.save_det_result_img(ret, frame_id)
                                    else:
                                        self.save_det_result_img(vis, frame_id)
                                    
                            if self.app_do_tracking and not App.is_list_empty(cls_boxes) and not self.source_changed:
                                
                                t = time.time()
                                tmp_tracking_boxes = self.track(self.image.copy(), cls_boxes, frame_id, timers)
                                                                
                                self.logger.info('Tracking time (incl. feature generation): {:.3f}s'.format(time.time() - t))

                                if self.app_display_tracking_result_img:
                                    if frame_id % self.app_display_tracking_every_nth_frame == 0 :                              
                                        vis = self.visualize_tracking(vis, tmp_tracking_boxes, fps_text)                                       
                                        
                                                                
                                if self.app_save_tracking_result_img:
                                    if not self.app_display_tracking_result_img:
                                        ret = self.visualize_tracking(vis, tmp_tracking_boxes, fps_text)
                                        self.save_tracking_result_img(ret, frame_id)
                                    else:
                                        self.save_tracking_result_img(vis, frame_id)
                                
                                tracking_boxes = self.extend_result_boxes(frame_id, tracking_boxes, tmp_tracking_boxes)
                                                                
                            if self.app_display:
                                cv2.imshow('source', vis)  
                                ch = 0xFF & cv2.waitKey(1)
                                if ch == 27:
                                    break
         
                            self.logger.info('Total time frame {}: {:.3f}s'.format(frame_id, time.time() - t))                            
                                
                            frame_id += 1                              

                except Exception:
                    print(sys.exc_info()[0] + sys.exc_info()[1])     
                    #continue
            
            if self.app_save_det_result_boxes:                                
                self.save_det_result_boxes(all_boxes)    
                self.logger.info('Wrote detections to: {}'.format(os.path.abspath(self.app_save_det_result_path)))
                
            if self.app_save_tracking_result_boxes:                                
                self.save_tracking_result_boxes(list(tracking_boxes))
                self.logger.info('Wrote tracks to: {}'.format(os.path.abspath(self.app_save_tracking_result_path)))
                
            file_stream.stop()
            self.source_changed = False
            
            if not self.bulk_processing:
                self.start_processing()
            else:
                self.root.quit()
Ejemplo n.º 2
0
# import the necessary packages
#from imutils.video import FileVideoStream
from filevideostream import FileVideoStream
from imutils.video import FPS
import numpy as np
import argparse
import imutils
import time
import cv2

path = "/home/hugo/stream.flv"
fvs = FileVideoStream(path).start()
time.sleep(1.0)

# start the FPS timer
fps = FPS().start()
reboot = False

# loop over frames from the video file stream
while True:

	if(fvs.more() or reboot):
		frame = fvs.read()
		reboot = False
		# display the size of the queue on the frame
		cv2.putText(frame, "Queue Size: {}".format(fvs.Q.qsize()),
			(10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 255, 0), 2)	
 
		# show the frame and update the FPS counter
		cv2.imshow("Frame", frame)
		cv2.waitKey(1)
Ejemplo n.º 3
0
import imutils
import time
import cv2
from filevideostream import FileVideoStream


# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-v", "--video", required=True,
	help="path to input video file")
args = vars(ap.parse_args())

# start the file video stream thread and allow the buffer to
# start to fill
print("[INFO] starting video file thread...")
fvs = FileVideoStream(args["video"], 128).start()
time.sleep(1.0)

# start the FPS timer
fps = FPS().start()

# loop over frames from the video file stream
while fvs.more():
	# grab the frame from the threaded video file stream, resize
	# it, and convert it to grayscale (while still retaining 3
	# channels)
	frame = fvs.read()
	frame = imutils.resize(frame, width=300)
	frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
	frame = np.dstack([frame, frame, frame])
Ejemplo n.º 4
0
from filevideostream import FileVideoStream
import numpy as np
import argparse
import time
import cv2

# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-v", "--video", required=True,
	help="path to input video file")
args = ap.parse_args()
print("[INFO] starting video file thread...")
fvs = FileVideoStream(args.video).start()
time.sleep(1.0)
index = 1
start = time.time()
while fvs.more():
    s = time.time()
    exist, end = fvs.more()
    if end:
        break
    if exist == False:
        #print('No frame')
        time.sleep(0.001)
        continue
    frame = fvs.read()
    e = time.time()

    cv2.putText(frame, "Hello World", (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 255, 0), 2)	
    cv2.putText(frame, "FPS:%3.1f"%(1 / (e - s)), (10, 60), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 255, 0), 2)	
    img = frame.copy()
Ejemplo n.º 5
0

use_tinymodel = False
model_path ="./models/yolo-tiny.h5"
detector = ObjectDetection()

if not use_tinymodel:
    model_path = "./models/yolo.h5"
    detector.setModelTypeAsYOLOv3()
else:
    detector.setModelTypeAsYOLOv3()

detector.setModelPath(model_path)
detector.loadModel()

fvs = FileVideoStream("./input/parking-lot.mp4")
# Start reading input video file   
fvs.start()

threads = []

main_thread = threading.Thread( target=main)
threads.append(main_thread)
main_thread.start()

monitor_state_thread = threading.Thread( target=monitor_state)
threads.append(monitor_state_thread)
monitor_state_thread.start()


for thread in threads:
Ejemplo n.º 6
0
    cv2.putText(frame, "Spots:", (1090, 60), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 255, 0), 1)

    #Label each parking spot
    for level in parking_dict:
        for lot, rect in parking_dict[level].items():
            x_value = rect[0] + 3
            y_value = rect[1] - 15
            if (int(lot[2:]) > 23):
                y_value = rect[3] + 15
            cv2.putText(frame, "{}".format(lot), (x_value, y_value), cv2.FONT_HERSHEY_SIMPLEX, 0.25, (0, 255, 0), 1)
            
    return frame


# Start reading input video file   
fvs = FileVideoStream("./input/video_black_bars.mp4")
fvs.start()

threads = []

main_thread = threading.Thread( target=main)
threads.append(main_thread)
main_thread.start()

monitor_state_thread = threading.Thread( target=monitor_state)
threads.append(monitor_state_thread)
monitor_state_thread.start()

for thread in threads:
    thread.join()