def run(self, args): if (args.use_svm): settings.USE_SVM = True print('use svm') else: settings.USE_SVM = False print('use distance') plc = PLCControl() face_recognition = Recognition() video_capture = VideoProcessor( self.camera.get_url(), output_queue=frame_queue) video_capture.start_processing() while True: # Capture frame-by-frame frame = video_capture.get_latest_frame() if frame is None: continue faces = face_recognition.identify(frame) legal = False for f in faces: recognition_result_processor.put_result(f.result) #recognition_result_processor.send_email(f.result) if (f.result.result == settings.LEGAL): legal = True if len(faces) > 0: recognition_result_processor.push_result() if legal: plc.open_door() self.add_overlays(frame, faces) if args.debug: cv2.imshow('Video', frame) if cv2.waitKey(1) & 0xFF == ord('q'): break # When everything is done, release the capture video_capture.stop_processing() video_capture.cleanup() cv2.destroyAllWindows()
def main(args): video_capture = VideoProcessor( 'rtsp://{username}:{password}@{ip}/cam/realmonitor?channel=1&subtype=0' .format(username=settings.USERNAME, password=settings.PASSWORD, ip=settings.IP), output_queue=frame_queue) detection = Detection() num = 0 count = 0 path = 'faces' if not os.path.exists(path): os.mkdir(path) path = path + '/' + args.id if not os.path.exists(path): os.mkdir(path) print('capture faces for : ', str(args.id)) video_capture.start_processing() while True: frame = video_capture.get_latest_frame() if frame is None: continue faces = detection.find_faces(frame) for face in faces: filename = path + '/' + str(num) + '.jpg' if num % 2 == 0: cv2.imwrite(filename, face.face_image_raw) count = count + 1 add_overlays(frame, faces, count) num = num + 1 cv2.imshow('face capture', frame) if cv2.waitKey(1) & 0xFF == ord('q'): break print('capture faces num: ', str(num))
def main(): """Main function for the program. Everything starts here. :return: None """ global resize_output, resize_output_width, resize_output_height, \ resize_output, resize_output_width, resize_output_height, \ device_count if (not handle_args()): print_usage() return 1 # get list of all the .mp4 files in the image directory input_video_filename_list = os.listdir(input_video_path) input_video_filename_list = [i for i in input_video_filename_list if i.endswith('.mp4')] if (len(input_video_filename_list) < 1): # no images to show print('No video (.mp4) files found') return 1 resting_image = cv2.imread("resting_image.png") if (resting_image is None): resting_image = numpy.zeros((800, 600, 3), numpy.uint8) if (resize_output): resting_image = cv2.resize(resting_image, (resize_output_width, resize_output_height), cv2.INTER_LINEAR) # Set logging level to only log errors mvnc.global_set_option(mvnc.GlobalOption.RW_LOG_LEVEL, 3) devices = mvnc.enumerate_devices() if len(devices) < 1: print('No NCS device detected.') print('Insert device and try again!') return 1 if (device_count < 1) or (device_count > len(devices)): device_count = len(devices) # Create an object detector processor for each device that opens # and store it in our list of processors obj_detect_list = list() idle_obj_detect_list = list() device_number = 0 for one_device in devices: try: obj_detect_dev = mvnc.Device(one_device) obj_detect_dev.open() print("opened device " + str(device_number)) obj_detector_proc = Yolov2_tiny_Processor(NETWORK_GRAPH_FILENAME, obj_detect_dev, inital_box_prob_thresh=min_score_percent / 100.0, classification_mask=object_classifications_mask, name="object detector " + str(device_number)) if (device_number < device_count): obj_detect_list.append(obj_detector_proc) else: idle_obj_detect_list.append(obj_detector_proc) device_number += 1 except: print("Could not open device " + str(device_number) + ", trying next device") pass if len(obj_detect_list) < 1: print('Could not open any NCS devices.') print('Reinsert devices and try again!') return 1 print("Using " + str(len(obj_detect_list)) + " devices for object detection") print_hot_keys() cv2.namedWindow(cv_window_name) cv2.moveWindow(cv_window_name, 10, 10) cv2.waitKey(1) exit_app = False while (True): for input_video_file in input_video_filename_list : for one_obj_detect_proc in obj_detect_list: print("using object detector: " + one_obj_detect_proc.get_name()) one_obj_detect_proc.drain_queues() # video processor that will put video frames images on the object detector's input FIFO queue video_proc = VideoProcessor(input_video_path + '/' + input_video_file, network_processor_list = obj_detect_list) video_proc.start_processing() frame_count = 0 start_time = time.time() last_throttle_time = start_time end_time = start_time uptime=time.time() while(True): done = False for one_obj_detect_proc in obj_detect_list: try: (filtered_objs, display_image) = one_obj_detect_proc.get_async_inference_result() print("resive result:",time.time()-uptime) uptime=time.time() except : print("exception caught in main") raise # check if the window is visible, this means the user hasn't closed # the window via the X button prop_val = cv2.getWindowProperty(cv_window_name, cv2.WND_PROP_ASPECT_RATIO) if (prop_val < 0.0): end_time = time.time() video_proc.stop_processing() video_proc.cleanup() exit_app = True break running_fps = frame_count / (time.time() - start_time) overlay_on_image(display_image, filtered_objs, running_fps) print("show time:",time.time()-uptime) if (resize_output): display_image = cv2.resize(display_image, (resize_output_width, resize_output_height), cv2.INTER_LINEAR) cv2.imshow(cv_window_name, display_image) raw_key = cv2.waitKey(1) if (raw_key != -1): if (handle_keys(raw_key, obj_detect_list) == False): end_time = time.time() exit_app = True done = True break frame_count += 1 #if (one_obj_detect_proc.is_input_queue_empty()): if (not video_proc.is_processing()): # asssume the video is over. end_time = time.time() done = True print('video processor not processing, assuming video is finished.') break #if (frame_count % 100) == 0: if ((time.time() - last_throttle_time) > throttle_check_seconds): #long movie, check for throttling devices # throttling = one_obj_detect_proc.get_device().get_option(mvnc.DeviceOption.RO_THERMAL_THROTTLING_LEVEL) last_throttle_time = time.time() print("movie not done, but going a long time so adjust for throttling") video_proc.pause() do_throttle_adjustment(obj_detect_list, idle_obj_detect_list) video_proc.unpause() if (done) : break frames_per_second = frame_count / (end_time - start_time) print('Frames per Second: ' + str(frames_per_second)) # check for throttling devices and save in throttling list throttling_list = list() for one_obj_detect_proc in obj_detect_list: throttling = one_obj_detect_proc.get_device().get_option(mvnc.DeviceOption.RO_THERMAL_THROTTLING_LEVEL) if (throttling > 0): print("\nDevice " + one_obj_detect_proc.get_name() + " is throttling, level is: " + str(throttling)) throttling_list.append(one_obj_detect_proc) if (not exit_app): # rest between movies, display an image while resting resting_display_image = cv2.resize(resting_image, (display_image.shape[1], display_image.shape[0]), cv2.INTER_LINEAR) cv2.imshow(cv_window_name, resting_display_image) if ((len(throttling_list) > len(idle_obj_detect_list))): # more devices throttling than we have in the idle list # so do extra rest by applying a multiplier to the rest time print("throttling devices... resting") cv2.waitKey(rest_seconds * 1000 * rest_throttling_multiplier) else: cv2.waitKey(rest_seconds * 1000) # remove the throttling devices from the main list and put them at the end so they will # be moved to the idle list with priority for one_throttling in throttling_list: obj_detect_list.remove(one_throttling) obj_detect_list.append(one_throttling) num_idle = len(idle_obj_detect_list) if (num_idle > len(obj_detect_list)): num_idle = len(obj_detect_list) if (num_idle > 0): # replace one of the devices with an idle device for idle_index in range(0, num_idle): #for one_idle_proc in idle_obj_detect_list: obj_detect_list.insert(0, idle_obj_detect_list.pop(0)) for idle_count in range(0, num_idle): idle_obj_detect_list.append(obj_detect_list.pop()) video_proc.stop_processing() video_proc.cleanup() if (exit_app): break if (exit_app): break # Clean up the graph and the device for one_obj_detect_proc in obj_detect_list: cv2.waitKey(1) one_obj_detect_proc.cleanup(True) cv2.destroyAllWindows()
def main(): """Main function for the program. Everything starts here. :return: None """ global resize_output, resize_output_width, resize_output_height, \ obj_detector_proc, resize_output, resize_output_width, resize_output_height, video_proc if (not handle_args()): print_usage() return 1 # get list of all the .mp4 files in the image directory # input_video_filename_list = os.listdir(input_video_path) # input_video_filename_list = [i for i in input_video_filename_list if i.endswith('.mp4')] # if (len(input_video_filename_list) < 1): # # no images to show # print('No video (.mp4) files found') # return 1 # Set logging level to only log errors mvnc.global_set_option(mvnc.GlobalOption.RW_LOG_LEVEL, 3) devices = mvnc.enumerate_devices() if len(devices) < 1: print('No NCS device detected.') print('Insert device and try again!') return 1 # Pick the first stick to run the network # use the first NCS device that opens for the object detection. # open as many devices as we find dev_count = 0 ncs_devices = [] obj_detectors = [] # open as many devices as detected for one_device in devices: print('one device ', one_device, 'dev_count ', dev_count, 'devices ', devices) obj_detect_dev = mvnc.Device(one_device) status = obj_detect_dev.open() ncs_devices.append(obj_detect_dev) obj_detector_proc = SsdMobileNetProcessor( NETWORK_GRAPH_FILENAME, ncs_devices, # obj_detect_dev, inital_box_prob_thresh=min_score_percent / 100.0, classification_mask=object_classifications_mask) obj_detectors.append(obj_detector_proc) print("opened device " + str(dev_count), 'status ', status) dev_count += 1 print('ncs_devices', ncs_devices) cv2.namedWindow(cv_window_name) cv2.moveWindow(cv_window_name, 10, 10) cv2.waitKey(1) #obj_detector_proc = SsdMobileNetProcessor(NETWORK_GRAPH_FILENAME, ncs_devices[0], # obj_detect_dev, # inital_box_prob_thresh=min_score_percent/100.0, # classification_mask=object_classifications_mask) exit_app = False # output file # fourcc = cv2.VideoWriter_fourcc(*"MJPG") fourcc = cv2.VideoWriter_fourcc(*"mp4v") filenum = 1 # keep the number of video files to a reasonable number in the current directory while (filenum < 20): doesexist = os.path.isfile("output" + str(filenum) + ".mp4") if (doesexist == False): out_filename = "output" + str(filenum) + ".mp4" break filenum += 1 print("Using output file name " + out_filename) outfile = cv2.VideoWriter(out_filename, fourcc, 11.0, (640, 480)) while (True): # for input_video_file in input_video_filename_list : # video processor that will put video frames images on the object detector's input FIFO queue # video_proc = VideoProcessor(input_video_path + '/' + input_video_file, # network_processor = obj_detector_proc) # use the video cam (0) ***swb*** video_proc = VideoProcessor( 0, network_processor=obj_detectors[0]) # obj_detector_proc) video_proc.start_processing() frame_count = 0 start_time = time.time() end_time = start_time while (True): try: (filtered_objs, display_image ) = obj_detector_proc.get_async_inference_result() except: print("exception caught in main") raise # check if the window is visible, this means the user hasn't closed # the window via the X button prop_val = cv2.getWindowProperty(cv_window_name, cv2.WND_PROP_ASPECT_RATIO) if (prop_val < 0.0): end_time = time.time() video_proc.stop_processing() exit_app = True break overlay_on_image(display_image, filtered_objs) if (resize_output): display_image = cv2.resize( display_image, (resize_output_width, resize_output_height), cv2.INTER_LINEAR) cv2.imshow(cv_window_name, display_image) outfile.write(display_image) raw_key = cv2.waitKey(1) if (raw_key != -1): if (handle_keys(raw_key, obj_detector_proc) == False): end_time = time.time() exit_app = True video_proc.stop_processing() continue frame_count += 1 if (obj_detector_proc.is_input_queue_empty()): end_time = time.time() print( 'Neural Network Processor has nothing to process, assuming video is finished.' ) break frames_per_second = frame_count / (end_time - start_time) print('Frames per Second: ' + str(frames_per_second)) throttling = obj_detect_dev.get_option( mvnc.DeviceOption.RO_THERMAL_THROTTLING_LEVEL) if (throttling > 0): print("\nDevice is throttling, level is: " + str(throttling)) print("Sleeping for a few seconds....") cv2.waitKey(2000) #video_proc.stop_processing() cv2.waitKey(1) video_proc.cleanup() if (exit_app): break #if (exit_app): # break # Clean up the graph and the device obj_detector_proc.cleanup() obj_detect_dev.close() obj_detect_dev.destroy() cv2.destroyAllWindows() outfile.release()