def detect(img_path): img = cv2.imread(img_path) assert img is not None img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) raw_img = CVRawImage(img) detected = capturer.capture(raw_img) print(f'On image detected {len(detected)} faces') return detected
def process_one_face(img_path): img = cv2.imread(img_path) assert img is not None img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) raw_img = CVRawImage(img) detected = capturer.capture(raw_img) print(f'On image detected {len(detected)} faces') assert len(detected) > 0 return recognizer.processing(detected[0])
def detect_and_save_templ(img_path, save_path): img = cv2.imread(img_path) assert img is not None img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) raw_img = CVRawImage(img) detected = capturer.capture(raw_img) assert len(detected) > 0 print('Detected', len(detected), 'faces') with open(save_path, 'wb') as f: recognizer.processing(detected[0]).save(f)
def camera_thread_func(self): try: prev_capture_time = time.time() * 1000 while True: # check for stop if self._shutdown: break # sleep some time to get stable frequency if self.frame_min_delay_ms > 0: now = time.time() * 1000 sleep_time_ms = max( self.frame_min_delay_ms - (now - prev_capture_time), 0) time.sleep(sleep_time_ms / 1000) prev_capture_time = time.time() * 1000 data = ImageAndDepth() self.source.get(data) if not len(data.color_image): self._shutdown = True elif data.color_format == raw_image.Format.FORMAT_BGR: image = np.reshape( data.color_image, [data.color_height, data.color_width, 3]) cvri = CVRawImage(image) frame_id = self.video_worker.add_video_frame( cvri, self.stream_id) self.frames_mutex.wait_one() new_queue_element = Pair(frame_id, data) self.frames.put(new_queue_element) self.frames_mutex.release_mutex() except Exception as ex: print('\n Worker.CameraThreadFunc exception: "{}"'.format(ex))
def __init__(self, database_list_filepath: str, recognizer: recognizer.Recognizer, capturer: Capturer, distance_threshold: float): self.vw_elements = list() self.samples = list() self.thumbnails = list() self.names = list() file = open(database_list_filepath, 'r') files = [x.strip() for x in file.readlines()] files = [x for x in files if not x.endswith(".txt")] files = list(filter(None, files)) files = sorted(files) dirs = [ os.path.join(os.path.dirname(database_list_filepath), os.path.dirname(f)) for f in files ] files = [os.path.basename(x) for x in files] prev_dir = "" person_id_counter = 0 element_id_counter = 0 for (f, dir) in list(zip(files, dirs)): if dir != prev_dir: person_id_counter += 1 # try to open name.txt file in this dir try: name_file = open(os.path.join(dir, "name.txt"), 'r') name = name_file.readline().strip() except FileNotFoundError: name = "" prev_dir = dir print( f"processing '{f}' name: '{name}' person_id: {person_id_counter}" ) # read image with opencv image_path = os.path.join(dir, f) cv_img = cv2.imread(image_path) assert cv_img is not None, f"Not opened {image_path}" image = CVRawImage(cv_img) if image.format != raw_image.Format.FORMAT_BGR: print(f"\n\nWarning: can't read image '{f}'\n\n") continue # capture the face captured_samples = capturer.capture(image) if len(captured_samples) != 1: print( f"\n\nWARNING: detected {len(captured_samples)} faces on '{f}' " f"image instead of one, image ignored \n\n") sample = captured_samples[0] # make template templ = recognizer.processing(sample) # prepare data for VideoWorker vw_element = video_worker.DatabaseElement(element_id_counter, person_id_counter, templ, distance_threshold) element_id_counter += 1 thumbnail = self.make_thumbnail(sample, name) self.vw_elements.append(vw_element) self.samples.append(sample) self.thumbnails.append(thumbnail) self.names.append(name) assert element_id_counter == len(self.vw_elements) assert element_id_counter == len(self.samples) assert element_id_counter == len(self.thumbnails) assert element_id_counter == len(self.names)
def detect(img): img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) raw_img = CVRawImage(img) detected = capturer.capture(raw_img) print(f'On image detected {len(detected)} faces') return detected