コード例 #1
0
 def _getgraph(self):
     inferenceModel = hiai.AIModelDescription('matmul', self.model_path)
     # init Graph
     self.graph = self.model.CreateGraph(inferenceModel, self.graph_id,
                                         self.model_engine_id)
     if self.graph is None:
         print("Init Graph failed")
コード例 #2
0
    def CreateGraph(self):
        '''
        Create graph

        Returns:
            graph
        '''
        path, filename = os.path.split(self.model_path)
        nntensorlist_object = hiai.NNTensorList()
        id = random.randint(1, 2**32 - 1)
        graph = hiai.Graph(hiai.GraphConfig(graph_id=id))
        with graph.as_default():
            engine = hiai.Engine()
            #resize_config = hiai.ResizeConfig(resize_width=300, resize_height = 300)
            #nntensorlist_object = engine.resize(input_tensor_list=nntensorlist_object, config=resize_config)

            ai_model_desc = hiai.AIModelDescription(name=filename,
                                                    path=self.model_path)
            ai_config = hiai.AIConfig(
                hiai.AIConfigItem("Inference", "item_value_2"))
            final_result = engine.inference(
                input_tensor_list=nntensorlist_object,
                ai_model=ai_model_desc,
                ai_config=ai_config)
        ret = copy.deepcopy(graph.create_graph())
        if ret != hiai.HiaiPythonStatust.HIAI_PYTHON_OK:
            graph.destroy()
            raise Exception("create graph failed, ret ", ret)
        print("create graph successful")
        return graph
コード例 #3
0
    def create_graph_with_dvpp(self, resize_cfg):
        '''
        Create graph with dvpp

        Args:
            resize_cfg: resize parameter, (resize_w, resize_h)
                resize_w: width of the destination resolution
                resize_h: height of the destination resolution
            
        Returns:
            graph: a graph configured with dvpp

        Raises:
            Exception("[create_graph_with_dvpp]: create graph failed, ret ", ret)
        '''
        nntensorlist_object =hiai.NNTensorList()
        graph = hiai.Graph(hiai.GraphConfig(graph_id=65530))
        with graph.as_default(): 
            engine = hiai.Engine()
            resize_config = hiai.ResizeConfig(resize_width=resize_cfg[0], resize_height = resize_cfg[1])
            nntensorlist_object = engine.resize(input_tensor_list=nntensorlist_object, config=resize_config)
            ai_model_desc = hiai.AIModelDescription(name=os.path.basename(self.model_path), path=self.model_path)
            ai_config = hiai.AIConfig(hiai.AIConfigItem("Inference", "item_value_2"))
            final_result = engine.inference(input_tensor_list=nntensorlist_object,
                                        ai_model=ai_model_desc,
                                        ai_config=ai_config)
        ret = copy.deepcopy(graph.create_graph())
        if ret != hiai.HiaiPythonStatust.HIAI_PYTHON_OK:
            graph.destroy()
            raise Exception("[create_graph_with_dvpp]: create graph failed, ret ", ret) 
        print("[create_graph_with_dvpp]: create graph successful")
        return graph
コード例 #4
0
 def _getgraph(self):
     # 描述推理模型
     inferenceModel = hiai.AIModelDescription('face_detection', face_detection_model_path)
     # 初始化Graph
     self.graph = self.model.CreateGraph(inferenceModel,self.graph_id,self.model_engine_id)
     if self.graph is None:
         print "Init Graph failed"
コード例 #5
0
    def CreateGraph(self):
        '''
        Create graph

        Returns:
            graph
        '''
        nntensorlist_object = hiai.NNTensorList()
        graph = hiai.Graph(hiai.GraphConfig(graph_id=65530))
        with graph.as_default():
            engine = hiai.Engine()
            print(self.model_path, os.path.basename(self.model_path))
            ai_model_desc = hiai.AIModelDescription(
                name=os.path.basename(
                    self.model_path), path=self.model_path)
            ai_config = hiai.AIConfig(
                hiai.AIConfigItem(
                    "Inference", "item_value_2"))
            final_result = engine.inference(
                input_tensor_list=nntensorlist_object,
                ai_model=ai_model_desc,
                ai_config=ai_config)
        ret = copy.deepcopy(graph.create_graph())
        if ret != hiai.HiaiPythonStatust.HIAI_PYTHON_OK:
            graph.destroy()
            raise Exception("create graph failed, ret ", ret)
        print("create graph successful")
        return graph
コード例 #6
0
 def _getgraph(self):
     # 描述推理模型
     DescriptionInferenceModel = hiai.AIModelDescription(
         'crowd_counting', crowd_counting_model_path)
     # 初始化Graph
     self.graph = self.model.CreateGraph(DescriptionInferenceModel)
     if self.graph is None:
         print("Init Graph failed")
コード例 #7
0
 def _getgraph(self):
     # 描述推理模型
     inferenceModel = hiai.AIModelDescription('Yolo3_Resnet18',
                                              yolo3_resnet18_model_path)
     # 初始化Graph
     self.graph = self.model.CreateGraph(inferenceModel, self.graph_id,
                                         self.model_engine_id)
     if self.graph is None:
         print("Init Graph failed")
コード例 #8
0
def main():
    inferenceModel = hiai.AIModelDescription('ShadowGAN', omModelName)
    myGraph = CreateGraph(inferenceModel)
    if myGraph is None:
        exit(0)

    if not osp.exists(outputDir):
        os.makedirs(outputDir)

    # Read images and corresponding masks
    inputImage = ReadImage(osp.join(testDataDir, 'noshadow', 'demo.jpg'),
                           channels=3)
    if inputImage is None:
        print("[ERROR] No input image.")
        DestroyGraph()
        exit(0)
    inputMask = ReadImage(osp.join(testDataDir, 'mask', 'demo.jpg'),
                          channels=1)
    if inputMask is None:
        print("[ERROR] No input mask.")
        DestroyGraph()
        exit(0)

    # Normalize to [-1.0, 1.0]
    inputImage = inputImage / 127.5 - 1.0
    inputMask = 1.0 - inputMask / 127.5

    # Convert HWC format to CHW
    # Note: hiai.NNTensor() only takes NCHW data as input.
    # [https://ascend.huawei.com/doc/Atlas200DK/1.3.0.0/zh/zh-cn_topic_0161025273.html]
    inputImage = inputImage.transpose([2, 0, 1]).copy()
    inputMask = inputMask.transpose([2, 0, 1]).copy()
    inputImageTensor = hiai.NNTensor(inputImage, inputWidth, inputHeight, 3,
                                     'input_image', DataType.FLOAT32_T,
                                     inputWidth * inputHeight * 3)
    inputMaskTensor = hiai.NNTensor(inputMask, inputWidth, inputHeight, 1,
                                    'input_mask', DataType.FLOAT32_T,
                                    inputWidth * inputHeight * 1)
    nntensorList = hiai.NNTensorList([inputImageTensor, inputMaskTensor])

    print("Inference start...")
    startTime = time.time()
    resultList = GraphInference(myGraph, nntensorList)
    if resultList is None:
        print("[ERROR] Inference failed.")
        DestroyGraph()
        exit(0)
    endTime = time.time()
    inferenceTime = endTime - startTime
    SaveResult(resultList, 'demo.jpg')

    DestroyGraph()
    print("Inference finished. Inference time: %.3fms" %
          (inferenceTime * 1000))
コード例 #9
0
def main():
    inferenceModel = hiai.AIModelDescription('faster-rcnn', omFileName)
    print omFileName
    print inferenceModel
    # we will resize the jpeg to 896*608 to meet faster-rcnn requirement via opencv,
    # so DVPP resizing is not needed
    myGraph = CreateGraphWithoutDVPP(inferenceModel)
    if myGraph is None:
        print "CreateGraph failed"
        return None

    # in this sample demo, the faster-rcnn  model requires 896*608 images
    dvppInWidth = 896
    dvppInHeight = 608

    start = time.time()
    index = 0
    pathDir = os.listdir(srcFileDir)
    for allDir in pathDir:
        child = os.path.join('%s%s' % (srcFileDir, allDir))
        if (not jpegHandler.is_img(child)):
            print '[info] file : ' + child + ' is not image !'
            continue

        # read the jpeg file and resize it to required w&h, than change it to YUV format.
        input_image = jpegHandler.jpeg2yuv(child, dvppInWidth, dvppInHeight)

        inputImageTensor = hiai.NNTensor(input_image, dvppInWidth,
                                         dvppInHeight, 3, 'testImage',
                                         DataType.UINT8_T,
                                         dvppInWidth * dvppInHeight * 3 / 2)
        imageinfo = np.array([896, 608, 3]).astype(np.float32)
        imageinfo = np.reshape(imageinfo, (1, 3))
        infoTensor = hiai.NNTensor(imageinfo, 1, 3, 1, 'testinfo',
                                   DataType.FLOAT32_T, imageinfo.size)

        datalist = [inputImageTensor, infoTensor]
        nntensorList = hiai.NNTensorList(datalist)
        if not nntensorList:
            print "nntensorList is null"
            break
        resultList = GraphInference(myGraph, nntensorList)

        if resultList is None:
            print "graph inference failed"
            continue
        print resultList[1].shape
        PostProcess(resultList, dstFileDir, child)
    end = time.time()
    print 'cost time ' + str((end - start) * 1000) + 'ms'

    hiai.hiai._global_default_graph_stack.get_default_graph().destroy()

    print '-------------------end'
コード例 #10
0
def main():
    inferenceModel = hiai.AIModelDescription('restnet18', resnet18OmFileName)
    # we will resize the jpeg to 256*224 to meet resnet18 requirement via opencv,
    # so DVPP resizing is not needed
    myGraph = CreateGraphWithoutDVPP(inferenceModel)
    if myGraph is None:
        print "CreateGraph failed"
        return None

    # in this sample demo, the resnet18 model requires 256*224 images
    dvppInWidth = 42
    dvppInHeight = 42

    start = time.time()
    jpegHandler.mkdirown(dstFileDir)

    pathDir = os.listdir(srcFileDir)
    for allDir in pathDir:
        child = os.path.join('%s%s' % (srcFileDir, allDir))
        if (not jpegHandler.is_img(child)):
            print '[info] file : ' + child + ' is not image !'
            continue

        # read the jpeg file and resize it to required w&h, than change it to YUV format.
        input_image = jpegHandler.jpeg2yuv(child, dvppInWidth, dvppInHeight)

        inputImageTensor = hiai.NNTensor(input_image, dvppInWidth,
                                         dvppInHeight, 3, 'testImage',
                                         DataType.UINT8_T,
                                         dvppInWidth * dvppInHeight)
        nntensorList = hiai.NNTensorList(inputImageTensor)

        resultList = GraphInference(myGraph, nntensorList)
        if resultList is None:
            print "graph inference failed"
            continue

        Resnet18PostProcess(resultList, srcFileDir, dstFileDir, allDir)

    end = time.time()
    print 'cost time ' + str((end - start) * 1000) + 'ms'

    hiai.hiai._global_default_graph_stack.get_default_graph().destroy()

    print '-------------------end'
コード例 #11
0
    def ExcuteInference(self, images):
        result = []
        for i in range(0, len(images)):
            nArray = Yuv2Array(images[i])
            ssd = {"name": "face_detection", "path": self.modelPath}
            nntensor = hiai.NNTensor(nArray)
            tensorList = hiai.NNTensorList(nntensor)

            if self.first == True:

                self.graph = hiai.Graph(hiai.GraphConfig(graph_id=2001))
                with self.graph.as_default():
                    self.engine_config = hiai.EngineConfig(
                        engine_name="HIAIDvppInferenceEngine",
                        side=hiai.HiaiPythonSide.Device,
                        internal_so_name='/lib/libhiai_python_device2.7.so',
                        engine_id=2001)
                    self.engine = hiai.Engine(self.engine_config)
                    self.ai_model_desc = hiai.AIModelDescription(
                        name=ssd['name'], path=ssd['path'])
                    self.ai_config = hiai.AIConfig(
                        hiai.AIConfigItem("Inference", "item_value_2"))
                    final_result = self.engine.inference(
                        input_tensor_list=tensorList,
                        ai_model=self.ai_model_desc,
                        ai_config=self.ai_config)
                ret = copy.deepcopy(self.graph.create_graph())
                if ret != hiai.HiaiPythonStatust.HIAI_PYTHON_OK:
                    print("create graph failed, ret", ret)
                    d_ret = graph.destroy()
                    SetExitFlag(True)
                    return HIAI_APP_ERROR, None
                self.first = False
            else:
                with self.graph.as_default():
                    final_result = self.engine.inference(
                        input_tensor_list=tensorList,
                        ai_model=self.ai_model_desc,
                        ai_config=self.ai_config)
            resTensorList = self.graph.proc(input_nntensorlist=tensorList)
            print("Inference result: ", resTensorList[0].shape)
            result.append(resTensorList)
        return HIAI_APP_OK, result
コード例 #12
0
def main():

    print "Start get data set"
    # 输入音频为pcm格式,即直接读取开发板录制的pcm格式音频,如输入音频为wav格式,请使用下行代码
    #Input_tensor, in_len = GetDataSet(speech_voice_path)

    # 输入音频为wav格式,如输入音频为pcm,请注释上述代码
    Input_tensor, in_len = GetDataSet2(speech_voice_path2)

    # 判断Input_data是否正确获取
    if (Input_tensor == None):
        print 'Get input data failed'

    # 加载语音识别的声学模型
    print "Start load speech model"
    inferenceModel = hiai.AIModelDescription('asr', speech_recog_model)
    if inferenceModel is None:
        print 'Load model failed'
        return None

    # 初始化Graph
    print "Start init Graph"
    myGraph = CreateGraph(inferenceModel)

    # 开始模型推理
    print "Start inference"
    resultList = GraphInference(myGraph, Input_tensor)

    list_shape = np.array(resultList).shape

    if resultList is None:
        print "Inference failed"

    # 对结果进行后处理

    final_result = SpeechPostProcess(resultList, in_len)

    print '文本识别结果: ' + str(final_result)

    hiai.hiai._global_default_graph_stack.get_default_graph().destroy()

    print 'Speech Recognizition Finished !'
コード例 #13
0
def main():
    inferenceModel = hiai.AIModelDescription('segmentationDes', OmFileName)
    myGraph = CreateGraph(inferenceModel)
    if myGraph is None:
        print("CreateGraph failed")
        return None

    start = time.time()
    jpegHandler.mkdirown(dstFileDir)
    pathDir = os.listdir(srcFileDir)
    label_colours = cv.imread(colours, 1).astype(np.uint8)

    #print label_colours
    label_colours_bgr = label_colours[..., ::-1]
    for allDir in pathDir:
        child = os.path.join('%s%s' % (srcFileDir, allDir))
        if (not jpegHandler.is_img(child)):
            print('[info] file : ' + child + ' is not image !')
            continue

        print('[info] file : ' + child + ' begin process !')

        # read the jpeg file and resize it to required w&h, than change it to YUV format.
        input_image = jpegHandler.jpeg2yuv(child, InWidth, InHeight)

        inputImageTensor = hiai.NNTensor(input_image, InWidth, InHeight, 3,
                                         'testImage', DataType.UINT8_T,
                                         InWidth * InHeight * 3 / 2)
        nntensorList = hiai.NNTensorList(inputImageTensor)

        resultList = GraphInference(myGraph, nntensorList)
        if resultList is None:
            print(child + "graph inference failed")
            continue
        resultArray = resultList[0]
        resultArray = resultArray.reshape(19, InWidth, InHeight)

        prediction = resultArray.argmax(axis=0)

        #print prediction
        prediction = np.squeeze(prediction)
        prediction = np.resize(prediction, (3, InHeight, InWidth))
        prediction = prediction.transpose(1, 2, 0).astype(np.uint8)

        prediction_rgb = np.zeros(prediction.shape, dtype=np.uint8)
        cv.LUT(prediction, label_colours_bgr, prediction_rgb)

        input_path_ext = child.split(".")[-1]
        input_image_name = child.split("/")[-1:][0].replace(
            '.' + input_path_ext, '')
        out_path_im = dstFileDir + input_image_name + '_erfnet' + '.' + input_path_ext

        cv.imwrite(out_path_im,
                   prediction_rgb)  # color images for visualization
        print(input_image_name + ' process end ')

    end = time.time()
    print('cost time ' + str((end - start) * 1000) + 'ms')

    hiai.hiai._global_default_graph_stack.get_default_graph().destroy()

    print('-------------------end')
 def _getgraph(self):
     inferenceModel = hiai.AIModelDescription('faster_rcnn', detection_model_path)
     #print inferenceModel
     self.graph = self.model.CreateGraphWithoutDVPP(inferenceModel)
     if self.graph is None:
         print "CreateGraph failed"
コード例 #15
0
    def ExcuteInference(self, images):
        result = []
        for i in range(0, len(images)):
            nArray = Yuv2Array(images[i])
            ssd = {"name": "object_detection", "path": self.modelPath}
            nntensor = hiai.NNTensor(nArray)
            '''
            gray = cv2.cvtColor(nArray, cv2.COLOR_YUV2GRAY_420)
            blurred = cv2.GaussianBlur(gray, (5, 5), 0)
            thresh = cv2.threshold(blurred, 60, 255, cv2.THRESH_BINARY)[1]
            cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
            cnts=cnts[0]
            #print("1111111")
            j=0
            for c in cnts:
                M = cv2.moments(c)
                c = c.astype("float")
                c = c.astype("int")
                shape = detect(c)
               # cv2.drawContours(cv_image, [c], -1, (0, 255, 0), 2)
              #  image=cv2.cvtColor(cv_image, cv2.COLOR_YUV2RGB_I420)
              #  picpath='~/sample-objectdetection-python/picpath'+str(i)+'.jpg'
              #  cv2.imwrite(picpath,image)
           # print("111111")

    #BGR=cv2.cvtColor(nArray,cv2.COLOR_YUV2RGB)
    #return BGR 
           # print(cv123_image)
            # img_np = YUVtoRGB(nArray)
            # cv2.imwrite("text4.jpg",img_np)
            '''

            tensorList = hiai.NNTensorList(nntensor)

            if self.first == True:

                self.graph = hiai.Graph(hiai.GraphConfig(graph_id=2001))
                with self.graph.as_default():
                    self.engine_config = hiai.EngineConfig(
                        engine_name="HIAIDvppInferenceEngine",
                        side=hiai.HiaiPythonSide.Device,
                        internal_so_name='/lib/libhiai_python_device2.7.so',
                        engine_id=2001)
                    self.engine = hiai.Engine(self.engine_config)
                    self.ai_model_desc = hiai.AIModelDescription(
                        name=ssd['name'], path=ssd['path'])
                    self.ai_config = hiai.AIConfig(
                        hiai.AIConfigItem("Inference", "item_value_2"))
                    final_result = self.engine.inference(
                        input_tensor_list=tensorList,
                        ai_model=self.ai_model_desc,
                        ai_config=self.ai_config)
                ret = copy.deepcopy(self.graph.create_graph())
                if ret != hiai.HiaiPythonStatust.HIAI_PYTHON_OK:
                    print("create graph failed, ret", ret)
                    d_ret = graph.destroy()
                    SetExitFlag(True)
                    return HIAI_APP_ERROR, None
                self.first = False
            else:
                with self.graph.as_default():
                    final_result = self.engine.inference(
                        input_tensor_list=tensorList,
                        ai_model=self.ai_model_desc,
                        ai_config=self.ai_config)
            resTensorList = self.graph.proc(input_nntensorlist=tensorList)
            # print("Inference result: ", resTensorList[0].shape)
            result.append(resTensorList)

        return HIAI_APP_OK, result