def __load_image(self): if not self.face_identifier.has_ai85_adapter(): self.__show_adapter_error() return img_path, _ = QtWidgets.QFileDialog.getOpenFileName(self, 'Open file', directory='', filter="Image files (*.jpg *.jpeg *.bmp *.png) ") #pylint: disable=line-too-long if img_path == '': return print(img_path) with open(img_path, 'rb') as img_file: content = img_file.read() img = QImage() img.loadFromData(content) img_np = cvt_qimage_to_img(img) img.scaled(self.img_size[1], self.img_size[0], aspectRatioMode=Qt.KeepAspectRatio) # self.__set_preview_image(img.scaled(self.img_size[1], self.img_size[0], # aspectRatioMode=Qt.KeepAspectRatio)) # if self.face_detector is None: # self.face_detector = MTCNN(image_size=56, margin=0, min_face_size=56, # thresholds=[0.6, 0.8, 0.92], factor=0.85, # post_process=True, device='cpu') # img = get_face_image(img_np, self.face_detector) # resize image dim = (56, 56) resized_img = cv2.resize(img_np, dim, interpolation=cv2.INTER_AREA) if resized_img is not None: # if img.shape == (160, 120, 3): img_rgb = cv2.cvtColor(resized_img, cv2.COLOR_BGR2RGB) # self.__set_captured_image(cvt_img_to_qimage(img)) box = self.__identify_face(img_rgb) img_rgb = cv2.rectangle(img_rgb, (box[0], box[1]), (box[2], box[3]), (0, 255, 0), 1) self.__set_captured_image(cvt_img_to_qimage(img_rgb)) # preview_img = cvt_qimage_to_img(img.scaled(self.img_size[1], self.img_size[0], aspectRatioMode=Qt.KeepAspectRatio)) # self.__set_preview_image(cvt_img_to_qimage(preview_img)) preview_img = cvt_qimage_to_img(img) preview_img_rgb = cv2.cvtColor(preview_img, cv2.COLOR_BGR2RGB) y_ratio = preview_img_rgb.shape[0] // img_rgb.shape[0] x_ratio = preview_img_rgb.shape[1] // img_rgb.shape[1] # x_ratio, y_ratio = preview_img_rgb.shape[0] / img_rgb.shape[0], preview_img_rgb.shape[1] / img_rgb.shape[1] preview_img_rgb = cv2.rectangle( preview_img_rgb, (box[0] * x_ratio, box[1] * y_ratio), (box[2] * x_ratio, box[3] * y_ratio), (0, 255, 0), 1) self.__set_preview_image(cvt_img_to_qimage(preview_img_rgb))
def __load_image(self): if not self.face_identifier.has_ai85_adapter(): self.__show_adapter_error() return img_path, _ = QtWidgets.QFileDialog.getOpenFileName(self, 'Open file', directory='', filter="Image files (*.jpg *.jpeg *.bmp *.png) ") #pylint: disable=line-too-long if img_path == '': return print(img_path) with open(img_path, 'rb') as img_file: content = img_file.read() img = QImage() img.loadFromData(content) img_np = cvt_qimage_to_img(img) self.__set_preview_image(img.scaled(self.img_size[1], self.img_size[0], aspectRatioMode=Qt.KeepAspectRatio)) if self.face_detector is None: self.face_detector = MTCNN(image_size=80, margin=0, min_face_size=60, thresholds=[0.6, 0.8, 0.92], factor=0.85, post_process=True, device='cpu') img = get_face_image(img_np, self.face_detector) if img is not None: if img.shape == (160, 120, 3): img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) self.__set_captured_image(cvt_img_to_qimage(img)) self.__identify_face(img)
def run(self): """Runs camera capture""" prev = time.time() while True: now = time.time() rval, frame = self.camera.get_frame() if rval: convert_qt_format = cvt_img_to_qimage(frame) qt_img = convert_qt_format.scaled(640, 480, Qt.KeepAspectRatio) if (now - prev) >= self.emit_period: self.change_pixmap.emit(qt_img) prev = now
def __capture_button_pressed(self): if not self.capture_busy: self.capture_busy = True if not self.face_identifier.has_ai85_adapter(): self.__show_adapter_error() return capture = self.preview_frame.pixmap() capture = capture.toImage() captured_img = cvt_qimage_to_img(capture) cropped_img = captured_img[self.camera.start_point[1]:self.camera.end_point[1], self.camera.start_point[0]:self.camera.end_point[0]] cropped_img = cv2.resize(cropped_img, self.capture_size) cropped_img = cv2.cvtColor(cropped_img, cv2.COLOR_BGR2RGB).copy() self.__set_captured_image(cvt_img_to_qimage(cropped_img)) self.__identify_face(cropped_img) self.capture_busy = False