def get_inference_helper(self): if self.inference_config['name'] == 'triton': inference_helper = TritonInferenceHelper( 'RetinaFace', self.inference_config['triton_url'], self.inference_config['triton_port'], 'RetinaFace', 1) inference_helper.add_image_input('INPUT__0', (512, 512, 3), '识别用的图像', ([0, 0, 0], [1, 1, 1])) inference_helper.add_output('OUTPUT__0', (1, 16128, 2), 'face classification') inference_helper.add_output('OUTPUT__1', (1, 16128, 4), 'box predict') inference_helper.add_output('OUTPUT__2', (1, 16128, 10), 'landmark') self.inference_helper = inference_helper else: raise NotImplementedError( f"{self.inference_config['name']} helper for retina face detect not implement" )
def get_inference_helper(self): if self.inference_config['name'] == 'triton': inference_helper = TritonInferenceHelper( 'QRCodeDetect', self.inference_config['triton_url'], self.inference_config['triton_port'], 'QRCodeDetect', 1) inference_helper.add_image_input('INPUT__0', (-1, -1, 1), '检测用灰度的图像', ([ 0, ], [ 255, ])) inference_helper.add_output('OUTPUT__0', (-1), 'anchor location') inference_helper.add_output('OUTPUT__1', (-1), 'anchor score') self.inference_helper = inference_helper else: raise NotImplementedError( f"{self.inference_config['name']} helper for qrcode detect not implement" )
def get_inference_helper(self): if self.inference_config['name'] == 'triton': inference_helper = TritonInferenceHelper( 'UltraLightFaceDetect', self.inference_config['triton_url'], self.inference_config['triton_port'], 'UltraLightFaceDetect', 1) inference_helper.add_image_input( 'INPUT__0', (320, 240, 3), '识别用的图像', ([127, 127, 127], [128, 128, 128])) inference_helper.add_output('OUTPUT__0', (1, 4420, 2), 'detect score') inference_helper.add_output('OUTPUT__1', (1, 4420, 4), 'box predict') self.inference_helper = inference_helper else: raise NotImplementedError( f"{self.inference_config['name']} helper for ultra light face detect not implement" )
def get_inference_helper(self): if self.inference_config['name'] == 'triton': inference_helper = TritonInferenceHelper('Landmark106p', self.inference_config['triton_url'], self.inference_config['triton_port'], 'Landmark106p', 1) inference_helper.add_image_input('INPUT__0', (192, 192, 3), '检测用rgb的图像', ([0, 0, 0], [1, 1, 1])) inference_helper.add_output('OUTPUT__0', (1, 212), '回归的坐标') self.inference_helper = inference_helper else: raise NotImplementedError( f"{self.inference_config['name']} helper for face alignment 106p not implement")
def get_inference_helper(self): if self.inference_config['name'] == 'triton': inference_helper = TritonInferenceHelper( 'AsiaFaceEmbedding', self.inference_config['triton_url'], self.inference_config['triton_port'], 'AsiaFaceEmbedding', 1) inference_helper.add_image_input( 'INPUT__0', (112, 112, 3), '人脸图像', ([127.5, 127.5, 127.5], [127.5, 127.5, 127.5])) inference_helper.add_output('OUTPUT__0', (1, 512), '人脸特征向量') self.inference_helper = inference_helper else: raise NotImplementedError( f"{self.inference_config['name']} helper for asian face embedding not implement" )
def get_inference_helper(self): if self.inference_config['name'] == 'triton': inference_helper = TritonInferenceHelper( 'MiniFASNetV2', self.inference_config['triton_url'], self.inference_config['triton_port'], 'MiniFASNetV2', 1) inference_helper.add_image_input('INPUT__0', (80, 80, 3), '人脸图像', ([0, 0, 0], [1, 1, 1])) inference_helper.add_output('OUTPUT__0', (1, 3), '三个类别的分类情况,0,2均为非真实人脸') self.inference_helper = inference_helper else: raise NotImplementedError( f"{self.inference_config['name']} helper for MiniFASNetV2 not implement" )
def get_inference_helper(self): if self.inference_config['name'] == 'triton': inference_helper = TritonInferenceHelper( 'HumanMattingUnet', self.inference_config['triton_url'], self.inference_config['triton_port'], 'HumanMattingUnet', 1) inference_helper.add_image_input( 'INPUT__0', (1280, 720, 3), '分割用rgb图像', ([123.675, 116.28, 103.53], [58.395, 57.12, 57.375])) inference_helper.add_output('OUTPUT__0', (1, 720, 1280), '分割结果') self.inference_helper = inference_helper else: raise NotImplementedError( f"{self.inference_config['name']} helper for unet human matting not implement" )
def get_inference_helper(self): if self.inference_config['name'] == 'triton': inference_helper = TritonInferenceHelper( 'FaceParsing', self.inference_config['triton_url'], self.inference_config['triton_port'], 'FaceParsing', 1) inference_helper.add_image_input( 'INPUT__0', (512, 512, 3), '检测用rgb的图像', ([103.53, 116.28, 123.675], [57.375, 57.12, 58.395])) inference_helper.add_output('OUTPUT__0', (19, 512, 512), '每个类别的区域') self.inference_helper = inference_helper else: raise NotImplementedError( f"{self.inference_config['name']} helper for face parsing not implement" )
def get_inference_helper(self): if self.inference_config['name'] == 'triton': inference_helper = TritonInferenceHelper( 'Fair', self.inference_config['triton_url'], self.inference_config['triton_port'], 'Fair', 1) inference_helper.add_image_input( 'INPUT__0', (224, 224, 3), '人脸图像', ([123.675, 116.28, 103.53], [58.395, 57.12, 57.375])) inference_helper.add_output('OUTPUT__0', (18, ), 'race[:7] sexual[7:9] age[9:18]') self.inference_helper = inference_helper else: raise NotImplementedError( f"{self.inference_config['name']} helper for fair not implement" )
def get_inference_helper(self): if self.inference_config['name'] == 'triton': inference_helper = TritonInferenceHelper( 'HumanMattingBiSe', self.inference_config['triton_url'], self.inference_config['triton_port'], 'HumanMattingBiSe', 1) inference_helper.add_image_input( 'INPUT__0', (512, 512, 3), '分割用bgr图像', ([123.675, 116.28, 103.53], [58.395, 57.12, 57.375])) inference_helper.add_output('OUTPUT__0', (4, 512, 512), '分割结果,最后一个通道为alpha信息') self.inference_helper = inference_helper else: raise NotImplementedError( f"{self.inference_config['name']} helper for bise human matting not implement" )
def get_inference_helper(self): if self.inference_config['name'] == 'triton': support_backbone_type = {'mbv3', 'res34'} backbone_type = 'res34' if 'backbone_type' in self.inference_config and \ self.inference_config['backbone_type'] in support_backbone_type: backbone_type = self.inference_config['backbone_type'] else: print(f'crnn use the default backbone:{backbone_type}') inference_helper = TritonInferenceHelper( f'CRNN_{backbone_type}', self.inference_config['triton_url'], self.inference_config['triton_port'], f'CRNN_{backbone_type}', 1) inference_helper.add_image_input( 'INPUT__0', (32, -1, 3), '识别用的图像', ([127.5, 127.5, 127.5], [127.5, 127.5, 127.5])) inference_helper.add_output('OUTPUT__0', (-1, 1), '识别的max') inference_helper.add_output('OUTPUT__1', (-1, 1), '识别的argmax的结果') self.inference_helper = inference_helper else: raise NotImplementedError( f"{self.inference_config['name']} helper for crnn text recognize not implement" )
def get_inference_helper(self): if self.inference_config['name'] == 'triton': inference_helper = TritonInferenceHelper( 'TextOrientationClassification', self.inference_config['triton_url'], self.inference_config['triton_port'], 'TextOrientationClassification', 1) inference_helper.add_image_input( 'INPUT__0', (192, 48, 3), '识别用的图像', ([127.5, 127.5, 127.5], [127.5, 127.5, 127.5])) inference_helper.add_output('OUTPUT__0', (2, ), '方向的分类') self.inference_helper = inference_helper else: raise NotImplementedError( f"{self.inference_config['name']} helper for text orientation classification not implement" )
def get_inference_helper(self): if self.inference_config['name'] == 'triton': support_backbone_type = {'mbv3', 'res18'} backbone_type = 'res18' if 'backbone_type' in self.inference_config and \ self.inference_config['backbone_type'] in support_backbone_type: backbone_type = self.inference_config['backbone_type'] else: print(f'db use the default backbone:{backbone_type}') inference_helper = TritonInferenceHelper( f'DB_{backbone_type}', self.inference_config['triton_url'], self.inference_config['triton_port'], f'DB_{backbone_type}', 1) inference_helper.add_image_input( 'INPUT__0', (-1, -1, 3), '识别用的图像', ([123.675, 116.28, 103.53], [58.395, 57.12, 57.375])) inference_helper.add_output('OUTPUT__0', (1, -1, -1), 'detect score') self.inference_helper = inference_helper else: raise NotImplementedError( f"{self.inference_config['name']} helper for db text detect not implement" )