def upload_image() -> jsonify: if request.method == 'POST': file = request.files['file'] name = file.filename IMAGES.increase_id() image_index = IMAGES.cur_id filename = f"{image_index}_{name}" path = os.path.join(IMAGES.working_directory, filename) img = Image(index=image_index, file_name=filename, path=path) IMAGES.add_image(image=img) file.save(path) return jsonify({'upload': 'Upload successful'}) elif request.method == 'PUT': return jsonify({'upload': 'PUT method is not supported'}) elif request.method == 'GET': return jsonify({'upload': 'Uploading images to server'}) elif request.method == 'DELETE': return jsonify({'upload': 'DELETE method is not supported'}) else: abort(404)
def _visualize_detections(self, image: Image, masks, drawMask=False) -> np.ndarray: """ input: the original image, the full object from the mask cnn neural network, and the object ID, if it came out to get it output: an object indicating the objects found in the image, and the image itself, with selected objects and captions """ bgr_image = image.read() font = cv2.FONT_HERSHEY_DUPLEX fontScale = 0.8 thickness = 2 for i, currentObject in enumerate(image.objects): if currentObject.type not in config.availableObjects: continue y1, x1, y2, x2 = currentObject.coordinates lineInClassName = self.CLASS_NAMES.index(currentObject.type) color = [ int(c) for c in np.array(self.COLORS[lineInClassName]) * 255 ] text = "{}: {:.1f}".format(currentObject.type, currentObject.scores * 100) if (drawMask): mask = masks[:, :, i] # берем срез bgr_image = mrcnn.visualize.apply_mask( bgr_image, mask, color, alpha=0.6) # рисование маски cv2.rectangle(bgr_image, (x1, y1), (x2, y2), color, thickness) cv2.putText(bgr_image, text, (x1, y1 - 20), font, fontScale, color, thickness) return bgr_image.astype(np.uint8)
def get_one_image(self, file_id): if self.fs.exists(file_id): im = self.fs.get(file_id) data = cv2.imdecode(np.fromstring(im.read(), dtype=np.uint8), 1) return Image(data, im.location, im.timestamp, im._id, im.tagged) return None
def get_all_untagged_images(self): images_list = [] query = {'tagged': False} for im in self.fs.find(query): data = cv2.imdecode(np.fromstring(im.read(), dtype=np.uint8), 1) images_list.append(Image(data, im.location, im.timestamp, im._id, im.tagged)) return images_list
def testInit(self) -> None: test_class = Image(index=self.test_id_1, file_name=self.test_name_1, path=self.test_path_1) self.assertEqual(test_class.id, self.test_id_1) self.assertEqual(isinstance(test_class.id, int), True) self.assertEqual(test_class.name, self.test_name_1) self.assertEqual(isinstance(test_class.name, str), True) self.assertEqual(test_class.path, self.test_path_1) self.assertEqual(isinstance(test_class.path, str), True)
def pipeline(self, inputPath: str, outputPath: str = None): """ almost main """ if outputPath: dirs.createDirs(os.path.split(outputPath)[0]) filename = os.path.split(outputPath)[1] else: filename = os.path.split(inputPath)[1] cameraId = filename.split('_')[0] img = Image(inputPath, int(cameraId), outputPath=outputPath) binaryImage = img.read() rowDetections = self._detectByMaskCNN(img) detections = _parseR(self._humanizeTypes(rowDetections)) img.addDetections(detections) signedImg = self._visualize_detections(img, rowDetections['masks'], drawMask=False) img.write(outputPath, signedImg) return img
def _detectByMaskCNN(self, image: Image): """ input: image - the result of cv2.imread (<filename>) output: r - dictionary of objects found (r ['masks'], r ['rois'], r ['class_ids'], r ['scores']), detailed help somewhere else """ rgbImage = image.getRGBImage() r = self.model.detect([rgbImage], verbose=1)[0] # тут вся магия # проверить что будет если сюда подать НЕ ОДНО ИЗОБРАЖЕНИЕ, А ПОТОК return r
def get_images(self, images_ids): images_list = [] for file_id in images_ids: if self.fs.exists(file_id): im = self.fs.get(file_id) data = cv2.imdecode(np.fromstring(im.read(), dtype=np.uint8), 1) images_list.append(Image(data, im.location, im.timestamp, im._id, im.tagged)) else: print("file not exist") return images_list
def setUp(self) -> None: self.test_path = f"{os.path.dirname(os.path.abspath(__file__))}\\Images" self.first_test_file = f"{self.test_path}\\0_testImage1.png" self.second_test_file = f"{self.test_path}\\1_testImage2.jpg" self.test_file_list = [] self.test_file_list.append(self.first_test_file) self.test_file_list.append(self.second_test_file) self.first_test_image = Image(index=0, file_name='testImage1.png', path=self.first_test_file) self.second_test_image = Image(index=1, file_name='testImage2.jpg', path=self.second_test_file) self.test_image_list = [] self.test_image_list.append(self.first_test_image) self.test_image_list.append(self.second_test_image) self.test_json_image_list = [] self.test_json_image_list.append(self.first_test_image.__dict__) self.test_json_image_list.append(self.second_test_image.__dict__)
def start_capture(self): self.isStart = True while self.isStart: self.video_stream.start() frame = self.video_stream.get_frame() if frame is None: print("invalid frame") continue res = FaceDetection.framesFilter(frame) if res: im = Image(frame, "Raanana", time.time()) file_id = self.db.save_image(im) tz_list = FaceRecognition.find_match(frame) for tz in tz_list: print(tz) suspect = self.db.get_suspect_by_tz(tz) suspect.add_image(file_id) self.db.update_suspect(suspect) im.tag() self.db.update_image_metadata(im)