Exemple #1
0
 def Inference(self, x, y):
     x = x.reshape(
         1024, 1024).copy()  #make sure memory order is the same as np order
     y = y.reshape(
         1024, 3072).copy()  #make sure memory order is the same as np order
     sizes = [1024 * 1024, 1024 * 3072]
     input_tensor_list = []
     input_tensor_list.append(
         hiai.NNTensor(x,
                       height=1024,
                       width=1024,
                       channel=1,
                       name='x',
                       data_type=DataType.FLOAT32_T,
                       size=sizes[0]))
     input_tensor_list.append(
         hiai.NNTensor(y,
                       height=1024,
                       width=3072,
                       channel=1,
                       name='y',
                       data_type=DataType.FLOAT32_T,
                       size=sizes[1]))
     nntensorList = hiai.NNTensorList(
         [input_tensor_list[0], input_tensor_list[1]])
     if not nntensorList:
         print(" matmul nntensorList is null")
     resultList = self.model.Inference(self.graph, nntensorList)
     # pdb.set_tracce()
     result = resultList[0]
     return result
Exemple #2
0
def main():
    inferenceModel = hiai.AIModelDescription('ShadowGAN', omModelName)
    myGraph = CreateGraph(inferenceModel)
    if myGraph is None:
        exit(0)

    if not osp.exists(outputDir):
        os.makedirs(outputDir)

    # Read images and corresponding masks
    inputImage = ReadImage(osp.join(testDataDir, 'noshadow', 'demo.jpg'),
                           channels=3)
    if inputImage is None:
        print("[ERROR] No input image.")
        DestroyGraph()
        exit(0)
    inputMask = ReadImage(osp.join(testDataDir, 'mask', 'demo.jpg'),
                          channels=1)
    if inputMask is None:
        print("[ERROR] No input mask.")
        DestroyGraph()
        exit(0)

    # Normalize to [-1.0, 1.0]
    inputImage = inputImage / 127.5 - 1.0
    inputMask = 1.0 - inputMask / 127.5

    # Convert HWC format to CHW
    # Note: hiai.NNTensor() only takes NCHW data as input.
    # [https://ascend.huawei.com/doc/Atlas200DK/1.3.0.0/zh/zh-cn_topic_0161025273.html]
    inputImage = inputImage.transpose([2, 0, 1]).copy()
    inputMask = inputMask.transpose([2, 0, 1]).copy()
    inputImageTensor = hiai.NNTensor(inputImage, inputWidth, inputHeight, 3,
                                     'input_image', DataType.FLOAT32_T,
                                     inputWidth * inputHeight * 3)
    inputMaskTensor = hiai.NNTensor(inputMask, inputWidth, inputHeight, 1,
                                    'input_mask', DataType.FLOAT32_T,
                                    inputWidth * inputHeight * 1)
    nntensorList = hiai.NNTensorList([inputImageTensor, inputMaskTensor])

    print("Inference start...")
    startTime = time.time()
    resultList = GraphInference(myGraph, nntensorList)
    if resultList is None:
        print("[ERROR] Inference failed.")
        DestroyGraph()
        exit(0)
    endTime = time.time()
    inferenceTime = endTime - startTime
    SaveResult(resultList, 'demo.jpg')

    DestroyGraph()
    print("Inference finished. Inference time: %.3fms" %
          (inferenceTime * 1000))
Exemple #3
0
def main():
    inferenceModel = hiai.AIModelDescription('faster-rcnn', omFileName)
    print omFileName
    print inferenceModel
    # we will resize the jpeg to 896*608 to meet faster-rcnn requirement via opencv,
    # so DVPP resizing is not needed
    myGraph = CreateGraphWithoutDVPP(inferenceModel)
    if myGraph is None:
        print "CreateGraph failed"
        return None

    # in this sample demo, the faster-rcnn  model requires 896*608 images
    dvppInWidth = 896
    dvppInHeight = 608

    start = time.time()
    index = 0
    pathDir = os.listdir(srcFileDir)
    for allDir in pathDir:
        child = os.path.join('%s%s' % (srcFileDir, allDir))
        if (not jpegHandler.is_img(child)):
            print '[info] file : ' + child + ' is not image !'
            continue

        # read the jpeg file and resize it to required w&h, than change it to YUV format.
        input_image = jpegHandler.jpeg2yuv(child, dvppInWidth, dvppInHeight)

        inputImageTensor = hiai.NNTensor(input_image, dvppInWidth,
                                         dvppInHeight, 3, 'testImage',
                                         DataType.UINT8_T,
                                         dvppInWidth * dvppInHeight * 3 / 2)
        imageinfo = np.array([896, 608, 3]).astype(np.float32)
        imageinfo = np.reshape(imageinfo, (1, 3))
        infoTensor = hiai.NNTensor(imageinfo, 1, 3, 1, 'testinfo',
                                   DataType.FLOAT32_T, imageinfo.size)

        datalist = [inputImageTensor, infoTensor]
        nntensorList = hiai.NNTensorList(datalist)
        if not nntensorList:
            print "nntensorList is null"
            break
        resultList = GraphInference(myGraph, nntensorList)

        if resultList is None:
            print "graph inference failed"
            continue
        print resultList[1].shape
        PostProcess(resultList, dstFileDir, child)
    end = time.time()
    print 'cost time ' + str((end - start) * 1000) + 'ms'

    hiai.hiai._global_default_graph_stack.get_default_graph().destroy()

    print '-------------------end'
 def Inference(self,input_image):
     inputImageTensor = hiai.NNTensor(input_image, self.width, self.height, 3, 'testImage', DataType.UINT8_T,self.width * self.height * 3)
     imageinfo=np.array([800,600,3]).astype(np.float32)
     imageinfo=np.reshape(imageinfo,(1,3))
     infoTensor = hiai.NNTensor(imageinfo, 1,3,1,'testinfo',DataType.FLOAT32_T, imageinfo.size)
     #print('inputImageTensor is :', inputImageTensor)
     datalist=[inputImageTensor, infoTensor]
     nntensorList = hiai.NNTensorList(datalist)
     #print('nntensorList', nntensorList)
     resultList = self.model.Inference(self.graph, nntensorList)
     #print('inference over')
     return resultList
Exemple #5
0
def make_input_tensor(wav_features):

    input_tensor_height = wav_features.shape[2]
    input_tensor_width = wav_features.shape[3]

    #wav_features = np.reshape(wav_features, (0,3,1,2))
    #print 555
    #print wav_features.shape[0]
    #print wav_features.shape[1]
    #print wav_features.shape[2]
    #print wav_features.shape[3]

    #print wav_features.shape
    input_tensor = hiai.NNTensor(wav_features, input_tensor_height, input_tensor_width, 1, "wav_features", DataType.FLOAT32_T, wav_features.size)
    width0 = input_tensor.width
    height0 = input_tensor.height
    channel0 = input_tensor.channel
    #print str(height0)
    #print str(width0)
    #print str(channel0)

    nntensorList = hiai.NNTensorList(input_tensor)

    return nntensorList
    #return wav_features
Exemple #6
0
 def Inference(self, input_image):
     inputImageTensor = hiai.NNTensor(input_image, self.height, self.width,
                                      3, 'testImage', DataType.UINT8_T,
                                      self.height * self.width * 3)
     nntensorList = hiai.NNTensorList(inputImageTensor)
     resultList = self.model.Inference(self.graph, nntensorList)
     if not resultList:
         print("Inference fail")
         return None
     #返回推理结果
     return resultList
    def Inference(self, img, mask):
        input_tensor_list = []
        sizes = [3 * 512 * 512, 512 * 512]
        input_tensor_list.append(
            hiai.NNTensor(img,
                          height=512,
                          width=512,
                          channel=3,
                          name='image',
                          data_type=DataType.FLOAT32_T,
                          size=sizes[0]))
        input_tensor_list.append(
            hiai.NNTensor(mask,
                          height=512,
                          width=512,
                          channel=1,
                          name='mask',
                          data_type=DataType.FLOAT32_T,
                          size=sizes[1]))
        nntensorList = hiai.NNTensorList(
            [input_tensor_list[0], input_tensor_list[1]])
        if not nntensorList:
            print("nntensorList is null")

        resultList = self.model.Inference(self.graph, nntensorList)

        inpainted_512 = resultList[0]
        attention = resultList[1]
        mask_512_new = resultList[2]

        # output in memory is NCHW, but in numpy it is displayed as NHWC, need to make numpy consistent with memory
        inpainted_512.shape = (1, 3, 512, 512)
        attention.shape = (1, 1024, 32, 32)
        mask_512_new.shape = (1, 1, 512, 512)

        # transpose to NHWC
        inpainted_512 = inpainted_512.transpose(0, 2, 3, 1)
        attention = attention.transpose(0, 2, 3, 1)
        mask_512_new = mask_512_new.transpose(0, 2, 3, 1)

        return inpainted_512, attention, mask_512_new
    def CreateNntensorList(self, input_data):
        '''
        Create NNTensorList instance with input_data

        Args:
            input_data: a numpy array, the data user wants to process

        Returns:
            nntensorList: NNTensorList instance that can be used by graph.proc
        '''
        inputImageTensor = hiai.NNTensor(input_data)
        nntensorList = hiai.NNTensorList(inputImageTensor)
        return nntensorList
    def Inference(self, input_image):
        if isinstance(input_image, np.ndarray) is None:
            return False
        strattime = time.time()
        # Image PreProcess
        resized_image = cv2.resize(input_image, (self.width, self.height))
        print('[0] resize cost: ' + str((time.time() - strattime) * 1000) +
              'ms')
        strattime = time.time()
        inputImageTensor = hiai.NNTensor(resized_image)
        print('[1] input image cost: ' +
              str((time.time() - strattime) * 1000) + 'ms')
        strattime = time.time()
        nntensorList = hiai.NNTensorList(inputImageTensor)
        print('[2] get list cost: ' + str((time.time() - strattime) * 1000) +
              'ms')
        strattime = time.time()
        # 调用推理接口
        resultList = self.model.Inference(self.graph, nntensorList)
        print('[3] get result list cost: ' +
              str((time.time() - strattime) * 1000) + 'ms')
        strattime = time.time()
        if resultList is not None:
            bboxes = utils.get_result(resultList, self.width,
                                      self.height)  # 获取检测结果
            # print("bboxes:", bboxes)
            print('[4] get box cost: ' +
                  str((time.time() - strattime) * 1000) + 'ms')
            strattime = time.time()
            # Yolov_resnet18 Inference
            output_image = utils.draw_boxes(resized_image, bboxes)  # 在图像上画框
            print('[5] draw box cost: ' +
                  str((time.time() - strattime) * 1000) + 'ms')
            strattime = time.time()
            output_image = cv2.resize(
                output_image, (input_image.shape[1], input_image.shape[0]))
            print('[6] resize cost: ' + str((time.time() - strattime) * 1000) +
                  'ms')
            strattime = time.time()
            img_name = datetime.datetime.now().strftime("%Y-%m-%d%H-%M-%S-%f")
            cv2.imwrite('output_image/' + str(img_name) + '.jpg', output_image)
            print('[7] write cost: ' + str((time.time() - strattime) * 1000) +
                  'ms')
            strattime = time.time()

        else:
            print('no person in this frame.')
            return False

        return True
    def Inference(self, nparryList, boxList):
        if not (isinstance(nparryList, list) and isinstance(boxList, list)):
            return False
        # 将上一帧图片包含每一个人脸的头部姿势描述清空
        del (self.resultList[:])
        # 将上一帧图片包含每一个人脸的68个关键点清空
        del (self.facepointList[:])

        for i in range(len(nparryList)):
            if isinstance(nparryList[i], np.ndarray) is None:
                print("please check your input format.")
                return False
            else:
                box_width = boxList[i][2] - boxList[i][0]
                box_height = boxList[i][3] - boxList[i][1]
                resized_image = cv2.resize(nparryList[i],
                                           (self.width, self.height))
                inputImageTensor = hiai.NNTensor(resized_image, self.height,
                                                 self.width, 3, 'testImage',
                                                 DataType.UINT8_T,
                                                 self.height * self.width * 3)
                nntensorList = hiai.NNTensorList(inputImageTensor)
                # 调用推理接口
                resultList = self.model.Inference(self.graph, nntensorList)
                if resultList is not None:
                    # pitch yaw roll
                    # *50
                    # 每次循环完后都要保存结果到self.resultList
                    self.resultList.append([
                        resultList[1][0][0][0][0] * 50,
                        resultList[1][0][0][0][1] * 50,
                        resultList[1][0][0][0][2] * 50
                    ])
                    HeadPosePoint = []
                    for j in range(136):
                        if j % 2 == 0:
                            HeadPosePoint.append(
                                (1 + resultList[0][0][0][0][j]) / 2 *
                                box_width + boxList[i][0])
                        else:
                            HeadPosePoint.append(
                                (1 + resultList[0][0][0][0][j]) / 2 *
                                box_height + boxList[i][1])
                    self.facepointList.append(HeadPosePoint)
                else:
                    print('not inference head pose in this frame.')
                    return False
        # 判断此时的头部姿势
        HeadposeInference.head_status_get(self)
        return True
Exemple #11
0
    def Inference_mult(self, *input_data):
        '''
        Inferece interface, process data with configured model

        Args:
            input_data: a numpy array, the data user wants to process
        
        Returns:
            a list, inference result
        '''
        result_list = []
        for data in input_data:
            inputImageTensor = hiai.NNTensor(data)
            inputNntensorList = hiai.NNTensorList(inputImageTensor)
            result = self.graph.proc(inputNntensorList)
            result_list.append(result)
        return result_list
Exemple #12
0
def main():
    inferenceModel = hiai.AIModelDescription('restnet18', resnet18OmFileName)
    # we will resize the jpeg to 256*224 to meet resnet18 requirement via opencv,
    # so DVPP resizing is not needed
    myGraph = CreateGraphWithoutDVPP(inferenceModel)
    if myGraph is None:
        print "CreateGraph failed"
        return None

    # in this sample demo, the resnet18 model requires 256*224 images
    dvppInWidth = 42
    dvppInHeight = 42

    start = time.time()
    jpegHandler.mkdirown(dstFileDir)

    pathDir = os.listdir(srcFileDir)
    for allDir in pathDir:
        child = os.path.join('%s%s' % (srcFileDir, allDir))
        if (not jpegHandler.is_img(child)):
            print '[info] file : ' + child + ' is not image !'
            continue

        # read the jpeg file and resize it to required w&h, than change it to YUV format.
        input_image = jpegHandler.jpeg2yuv(child, dvppInWidth, dvppInHeight)

        inputImageTensor = hiai.NNTensor(input_image, dvppInWidth,
                                         dvppInHeight, 3, 'testImage',
                                         DataType.UINT8_T,
                                         dvppInWidth * dvppInHeight)
        nntensorList = hiai.NNTensorList(inputImageTensor)

        resultList = GraphInference(myGraph, nntensorList)
        if resultList is None:
            print "graph inference failed"
            continue

        Resnet18PostProcess(resultList, srcFileDir, dstFileDir, allDir)

    end = time.time()
    print 'cost time ' + str((end - start) * 1000) + 'ms'

    hiai.hiai._global_default_graph_stack.get_default_graph().destroy()

    print '-------------------end'
Exemple #13
0
    def ExcuteInference(self, images):
        result = []
        for i in range(0, len(images)):
            nArray = Yuv2Array(images[i])
            ssd = {"name": "face_detection", "path": self.modelPath}
            nntensor = hiai.NNTensor(nArray)
            tensorList = hiai.NNTensorList(nntensor)

            if self.first == True:

                self.graph = hiai.Graph(hiai.GraphConfig(graph_id=2001))
                with self.graph.as_default():
                    self.engine_config = hiai.EngineConfig(
                        engine_name="HIAIDvppInferenceEngine",
                        side=hiai.HiaiPythonSide.Device,
                        internal_so_name='/lib/libhiai_python_device2.7.so',
                        engine_id=2001)
                    self.engine = hiai.Engine(self.engine_config)
                    self.ai_model_desc = hiai.AIModelDescription(
                        name=ssd['name'], path=ssd['path'])
                    self.ai_config = hiai.AIConfig(
                        hiai.AIConfigItem("Inference", "item_value_2"))
                    final_result = self.engine.inference(
                        input_tensor_list=tensorList,
                        ai_model=self.ai_model_desc,
                        ai_config=self.ai_config)
                ret = copy.deepcopy(self.graph.create_graph())
                if ret != hiai.HiaiPythonStatust.HIAI_PYTHON_OK:
                    print("create graph failed, ret", ret)
                    d_ret = graph.destroy()
                    SetExitFlag(True)
                    return HIAI_APP_ERROR, None
                self.first = False
            else:
                with self.graph.as_default():
                    final_result = self.engine.inference(
                        input_tensor_list=tensorList,
                        ai_model=self.ai_model_desc,
                        ai_config=self.ai_config)
            resTensorList = self.graph.proc(input_nntensorlist=tensorList)
            print("Inference result: ", resTensorList[0].shape)
            result.append(resTensorList)
        return HIAI_APP_OK, result
 def Inference(self,input_image):
     if isinstance(input_image,np.ndarray) is None:
         return False
     h,w,c = input_image.shape
     resized_image = cv2.resize(input_image,(self.width,self.height))
     inputImageTensor = hiai.NNTensor(resized_image)
     nntensorList = hiai.NNTensorList(inputImageTensor)
     # 将上一帧图片推理出的人脸数据清空
     del(self.boxList[:])
     del(self.nparray[:])
     # 调用推理接口
     resultList = self.model.Inference(self.graph, nntensorList)
     # 将推理结果的数据保存到临时列表
     # 再从临时列表中去筛选出有效的人脸坐标,保存到self.resultList中
     # 从原始图片中crop出的人脸(数据类型为numpy数组),保存到一个列表中self.nparray
     if resultList is not None:
         for i in range(200):
             if (resultList[0][i][0][0][2] > 0.8 and resultList[0][i][0][0][2] <= 1.0):
                 if (resultList[0][i][0][0][3] < 0):
                     resultList[0][i][0][0][3] = 0
                 if (resultList[0][i][0][0][4] < 0):
                     resultList[0][i][0][0][4] = 0
                 rect_width = resultList[0][i][0][0][5] - resultList[0][i][0][0][3]
                 rect_height = resultList[0][i][0][0][6] - resultList[0][i][0][0][4]
                 if (resultList[0][i][0][0][3] != 0 and abs(self.M_left * rect_width) <= resultList[0][i][0][0][3]):
                     resultList[0][i][0][0][3] = resultList[0][i][0][0][3] + self.M_left * rect_width
                 if (resultList[0][i][0][0][4] != 0 and abs(self.M_top * rect_height) <= resultList[0][i][0][0][4]):
                     resultList[0][i][0][0][4] = resultList[0][i][0][0][4] + self.M_top * rect_height
                 resultList[0][i][0][0][5] = resultList[0][i][0][0][5] + self.M_right * rect_width
                 resultList[0][i][0][0][6] = resultList[0][i][0][0][6] + self.M_bottom * rect_height
                 # 人脸框的坐标
                 self.boxList.append([resultList[0][i][0][0][3]*w, resultList[0][i][0][0][4]*h,
                                         resultList[0][i][0][0][5]*w, resultList[0][i][0][0][6]*h])
                 self.nparray.append(input_image[int(self.boxList[i][1]):int(self.boxList[i][3]),int(self.boxList[i][0]):int(self.boxList[i][2])])
             else:
                 break
     else:
         print ('not inference face in this frame.')
         return False
     return True
Exemple #15
0
def main():
    inferenceModel = hiai.AIModelDescription('segmentationDes', OmFileName)
    myGraph = CreateGraph(inferenceModel)
    if myGraph is None:
        print("CreateGraph failed")
        return None

    start = time.time()
    jpegHandler.mkdirown(dstFileDir)
    pathDir = os.listdir(srcFileDir)
    label_colours = cv.imread(colours, 1).astype(np.uint8)

    #print label_colours
    label_colours_bgr = label_colours[..., ::-1]
    for allDir in pathDir:
        child = os.path.join('%s%s' % (srcFileDir, allDir))
        if (not jpegHandler.is_img(child)):
            print('[info] file : ' + child + ' is not image !')
            continue

        print('[info] file : ' + child + ' begin process !')

        # read the jpeg file and resize it to required w&h, than change it to YUV format.
        input_image = jpegHandler.jpeg2yuv(child, InWidth, InHeight)

        inputImageTensor = hiai.NNTensor(input_image, InWidth, InHeight, 3,
                                         'testImage', DataType.UINT8_T,
                                         InWidth * InHeight * 3 / 2)
        nntensorList = hiai.NNTensorList(inputImageTensor)

        resultList = GraphInference(myGraph, nntensorList)
        if resultList is None:
            print(child + "graph inference failed")
            continue
        resultArray = resultList[0]
        resultArray = resultArray.reshape(19, InWidth, InHeight)

        prediction = resultArray.argmax(axis=0)

        #print prediction
        prediction = np.squeeze(prediction)
        prediction = np.resize(prediction, (3, InHeight, InWidth))
        prediction = prediction.transpose(1, 2, 0).astype(np.uint8)

        prediction_rgb = np.zeros(prediction.shape, dtype=np.uint8)
        cv.LUT(prediction, label_colours_bgr, prediction_rgb)

        input_path_ext = child.split(".")[-1]
        input_image_name = child.split("/")[-1:][0].replace(
            '.' + input_path_ext, '')
        out_path_im = dstFileDir + input_image_name + '_erfnet' + '.' + input_path_ext

        cv.imwrite(out_path_im,
                   prediction_rgb)  # color images for visualization
        print(input_image_name + ' process end ')

    end = time.time()
    print('cost time ' + str((end - start) * 1000) + 'ms')

    hiai.hiai._global_default_graph_stack.get_default_graph().destroy()

    print('-------------------end')
    def ExcuteInference(self, images):
        result = []
        for i in range(0, len(images)):
            nArray = Yuv2Array(images[i])
            ssd = {"name": "object_detection", "path": self.modelPath}
            nntensor = hiai.NNTensor(nArray)
            '''
            gray = cv2.cvtColor(nArray, cv2.COLOR_YUV2GRAY_420)
            blurred = cv2.GaussianBlur(gray, (5, 5), 0)
            thresh = cv2.threshold(blurred, 60, 255, cv2.THRESH_BINARY)[1]
            cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
            cnts=cnts[0]
            #print("1111111")
            j=0
            for c in cnts:
                M = cv2.moments(c)
                c = c.astype("float")
                c = c.astype("int")
                shape = detect(c)
               # cv2.drawContours(cv_image, [c], -1, (0, 255, 0), 2)
              #  image=cv2.cvtColor(cv_image, cv2.COLOR_YUV2RGB_I420)
              #  picpath='~/sample-objectdetection-python/picpath'+str(i)+'.jpg'
              #  cv2.imwrite(picpath,image)
           # print("111111")

    #BGR=cv2.cvtColor(nArray,cv2.COLOR_YUV2RGB)
    #return BGR 
           # print(cv123_image)
            # img_np = YUVtoRGB(nArray)
            # cv2.imwrite("text4.jpg",img_np)
            '''

            tensorList = hiai.NNTensorList(nntensor)

            if self.first == True:

                self.graph = hiai.Graph(hiai.GraphConfig(graph_id=2001))
                with self.graph.as_default():
                    self.engine_config = hiai.EngineConfig(
                        engine_name="HIAIDvppInferenceEngine",
                        side=hiai.HiaiPythonSide.Device,
                        internal_so_name='/lib/libhiai_python_device2.7.so',
                        engine_id=2001)
                    self.engine = hiai.Engine(self.engine_config)
                    self.ai_model_desc = hiai.AIModelDescription(
                        name=ssd['name'], path=ssd['path'])
                    self.ai_config = hiai.AIConfig(
                        hiai.AIConfigItem("Inference", "item_value_2"))
                    final_result = self.engine.inference(
                        input_tensor_list=tensorList,
                        ai_model=self.ai_model_desc,
                        ai_config=self.ai_config)
                ret = copy.deepcopy(self.graph.create_graph())
                if ret != hiai.HiaiPythonStatust.HIAI_PYTHON_OK:
                    print("create graph failed, ret", ret)
                    d_ret = graph.destroy()
                    SetExitFlag(True)
                    return HIAI_APP_ERROR, None
                self.first = False
            else:
                with self.graph.as_default():
                    final_result = self.engine.inference(
                        input_tensor_list=tensorList,
                        ai_model=self.ai_model_desc,
                        ai_config=self.ai_config)
            resTensorList = self.graph.proc(input_nntensorlist=tensorList)
            # print("Inference result: ", resTensorList[0].shape)
            result.append(resTensorList)

        return HIAI_APP_OK, result