class PreparingService(threading.Thread): def __init__(self, sending_queue, stop_dict, cfg, ext_ratio=0.1): super(PreparingService, self).__init__() self.cfg = cfg self.device_id = get_mac() self.lp_detector = LicensePlateDetector(cfg.yolo_model) self.segmentation = SegmentCharacter() self.sending_queue = sending_queue self.image_grabber = ImageGrabber(cfg.camera.url, cfg.camera.usb) self.image_grabber.start() self.ext_ratio = ext_ratio self.stop_dict = stop_dict def run(self): while not self.stop_dict["PreparingService"]: try: if not self.image_grabber.stop: image = self.image_grabber.get_frame() bboxes, labels, conf_scores = self.lp_detector.detect( image) # Coordinate of characters in a license plate char_coords = [] # Coordinate of license plate coord_boxes = [] for i, bbox in enumerate(bboxes): conf_score = conf_scores[i] coord_box = [int(val) for val in bbox] width = coord_box[2] - coord_box[0] height = coord_box[3] - coord_box[1] coord_box[0] = max( 0, int(coord_box[0] - width * self.ext_ratio)) coord_box[1] = max( 0, int(coord_box[1] - height * self.ext_ratio)) coord_box[2] = min( image.shape[1], int(coord_box[2] + width * self.ext_ratio)) coord_box[3] = min( image.shape[0], int(coord_box[3] + height * self.ext_ratio)) lp_image = image[coord_box[1]:coord_box[3], coord_box[0]:coord_box[2]] # print(lp_image.shape) processing_time = time.time() char_coord_perbox = self.segmentation.segment(lp_image) # print("Processing time:", time.time() - processing_time) if len(char_coord_perbox) > 0 and len( char_coord_perbox) <= 10: char_coord_perbox = pad_or_truncate( char_coord_perbox, 10) coord_boxes.append(coord_box) char_coords.append(char_coord_perbox) package = pickle.dumps({ "image": image, "coord_boxes": coord_boxes, "char_coords": char_coords, "deviceID": self.device_id }) self.sending_queue.put(package) except Exception as e: print(str(e)) self.image_grabber.stop = True self.image_grabber.join()
borderValue=0.0) return warped if __name__ == "__main__": import anyconfig import munch from image_grabber import ImageGrabber import cv2 opt = anyconfig.load("settings.yaml") opt = munch.munchify(opt) detector = FaceDetector(opt.face_detector.model_path, -1) imageGrabber = ImageGrabber(opt.camera.url, opt.camera.fps, opt.camera.push2queue_freq, opt.camera.rgb) imageGrabber.start() fps = FPS().start() while not imageGrabber.stop: image = imageGrabber.get_frame() bboxes, points = detector.detect(image, 1.0) # Comment this stuff if you want to test FPS aligned_faces = [] for i, bbox in enumerate(bboxes): conf_score = bbox[4] coords_box = [int(val) for val in bbox[:4]] if conf_score < 0.5: continue x_min, y_min, x_max, y_max = coords_box for point in points[i]: cv2.circle(image, (point[0], point[1]), 1, (0, 255, 0), 3) cv2.rectangle(image, (x_min, y_min), (x_max, y_max), (0, 255, 0),