Beispiel #1
0
 def crop_and_paste_get_roi(self, image, width, height,
                            crop_and_paste_width, crop_and_paste_height):
     """
     :image: input image
     :width: input image width 
     :height: input image height 
     :crop_and_paste_width: crop_and_paste_width
     :crop_and_paste_height: crop_and_paste_height
     :return: return AclImage
     """
     print('[Dvpp] vpc crop and paste stage:')
     input_desc = self._gen_input_pic_desc(image)
     stride_width = utils.align_up16(crop_and_paste_width)
     stride_height = utils.align_up2(crop_and_paste_height)
     out_buffer_size = utils.yuv420sp_size(stride_width, stride_height)
     out_buffer, ret = acl.media.dvpp_malloc(out_buffer_size)
     output_desc = \
         self._gen_output_pic_desc(crop_and_paste_width, crop_and_paste_height, out_buffer, out_buffer_size)
     self._crop_config = acl.media.dvpp_create_roi_config(
         0, (width >> 1 << 1) - 1, 0, (height >> 1 << 1) - 1)
     self._paste_config = acl.media.dvpp_create_roi_config(
         0, crop_and_paste_width - 1, 0, crop_and_paste_height - 1)
     ret = acl.media.dvpp_vpc_crop_and_paste_async(self._dvpp_channel_desc,
                                                   input_desc, output_desc,
                                                   self._crop_config,
                                                   self._paste_config,
                                                   self._stream)
     utils.check_ret("acl.media.dvpp_vpc_crop_and_paste_async", ret)
     ret = acl.rt.synchronize_stream(self._stream)
     utils.check_ret("acl.rt.synchronize_stream", ret)
     print('[Dvpp] vpc crop and paste stage success')
     stride_width = utils.align_up16(crop_and_paste_width)
     stride_height = utils.align_up2(crop_and_paste_height)
     return AclImage(out_buffer, stride_width, stride_height,
                     out_buffer_size, constants.MEMORY_DVPP)
Beispiel #2
0
def main():    
    """
    main
    """
    if (len(sys.argv) != 2):
        print("The App arg is invalid")
        exit(1)
    
    acl_resource = AclResource()
    acl_resource.init()

    crowdcount = CrowdCount(MODEL_PATH, MODEL_WIDTH, MODEL_HEIGHT)   
    ret = crowdcount.init()
    
    if not os.path.isdir(os.path.join(SRC_PATH, "../outputs")):
        os.mkdir(os.path.join(SRC_PATH, "../outputs"))
        
    image_dir = sys.argv[1]
    images_list = [os.path.join(image_dir, img)
                   for img in os.listdir(image_dir)
                   if os.path.splitext(img)[1] in constants.IMG_EXT]

    for image_file in images_list:        
        image = AclImage(image_file)            
        crop_and_paste_image = crowdcount.pre_process(image)
        print("pre process end")
        result = crowdcount.inference([crop_and_paste_image])              
        result_img_encode = crowdcount.post_process(result, image_file)      
    return result_img_encode
Beispiel #3
0
def main():
    if (len(sys.argv) != 2):
        print("The App arg is invalid")
        exit(1)

    acl_resource = AclResource()
    acl_resource.init()
    model = Model(MODEL_PATH)
    dvpp = Dvpp(acl_resource)

    image_dir = sys.argv[1]
    images_list = [
        os.path.join(image_dir, img) for img in os.listdir(image_dir)
        if os.path.splitext(img)[1] in const.IMG_EXT
    ]

    #Create a directory to store the inference results
    if not os.path.isdir(os.path.join(SRC_PATH, "../outputs")):
        os.mkdir(os.path.join(SRC_PATH, "../outputs"))

    image_info = construct_image_info()
    for image_file in images_list:
        image = AclImage(image_file)
        resized_image = pre_process(image, dvpp)
        print("pre process end")

        result = model.execute([
            resized_image,
        ])
        post_process(result, image_file)

        print("process " + image_file + " end")
Beispiel #4
0
    def jpegd(self, image):
        """
        jepg image to yuv image
        """
        # Create conversion output image desc
        output_desc, out_buffer = self._gen_jpegd_out_pic_desc(image)
        ret = acl.media.dvpp_jpeg_decode_async(self._dvpp_channel_desc,
                                               image.data(),
                                               image.size,
                                               output_desc,
                                               self._stream)
        if ret != constants.ACL_ERROR_NONE:
            log_error("dvpp_jpeg_decode_async failed ret={}".format(ret))
            return None

        ret = acl.rt.synchronize_stream(self._stream)
        if ret != constants.ACL_ERROR_NONE:
            log_error("dvpp_jpeg_decode_async failed ret={}".format(ret))
            return None

        # Return the decoded AclImage instance
        stride_width = utils.align_up128(image.width)
        stride_height = utils.align_up16(image.height)
        stride_size = utils.yuv420sp_size(stride_width, stride_height)
        return AclImage(out_buffer, stride_width,
                        stride_height, stride_size, constants.MEMORY_DVPP)
Beispiel #5
0
def main():
    """
    Program execution with picture directory parameters
    """
    
    if (len(sys.argv) != 2):
        print("The App arg is invalid")
        exit(1)
    
    acl_resource = AclResource()
    acl_resource.init()
    model = Model(MODEL_PATH)
    dvpp = Dvpp(acl_resource)
    
    #From the parameters of the picture storage directory, reasoning by a picture
    image_dir = sys.argv[1]
    images_list = [os.path.join(image_dir, img)
                   for img in os.listdir(image_dir)
                   if os.path.splitext(img)[1] in const.IMG_EXT]
    #Create a directory to store the inference results
    if not os.path.isdir('../outputs'):
        os.mkdir('../outputs')

    image_info = construct_image_info()

    for image_file in images_list:
        #read picture
        image = AclImage(image_file)
        #preprocess image
        resized_image = pre_process(image, dvpp)
        print("pre process end")
        #reason pictures
        result = model.execute([resized_image, image_info])    
        #process resresults
        post_process(result, image, image_file)
Beispiel #6
0
    def crop_and_paste(
            self,
            image,
            width,
            height,
            crop_and_paste_width,
            crop_and_paste_height):
        """
        crop_and_paste
        """
        print('[Dvpp] vpc crop and paste stage:')
        input_desc = self._gen_input_pic_desc(image)
        stride_width = utils.align_up16(crop_and_paste_width)
        stride_height = utils.align_up2(crop_and_paste_height)
        out_buffer_size = utils.yuv420sp_size(stride_width, stride_height)
        out_buffer, ret = acl.media.dvpp_malloc(out_buffer_size)
        output_desc = self._gen_output_pic_desc(
            crop_and_paste_width,
            crop_and_paste_height,
            out_buffer,
            out_buffer_size)
        self._crop_config = acl.media.dvpp_create_roi_config(
            0, (width >> 1 << 1) - 1, 0, (height >> 1 << 1) - 1)
        # set crop area:
        rx = float(width) / float(crop_and_paste_width)
        ry = float(height) / float(crop_and_paste_height)
        if rx > ry:
            dx = 0
            r = rx
            dy = int((crop_and_paste_height - height / r) / 2)
        else:
            dy = 0
            r = ry
            dx = int((crop_and_paste_width - width / r) / 2)
        pasteRightOffset = int(crop_and_paste_width - 2 * dx)
        pasteBottomOffset = int(crop_and_paste_height - 2 * dy)
        if (pasteRightOffset % 2) == 0:
            pasteRightOffset = pasteRightOffset - 1
        if (pasteBottomOffset % 2) == 0:
            pasteBottomOffset = pasteBottomOffset - 1
        self._paste_config = acl.media.dvpp_create_roi_config(
            0, pasteRightOffset, 0, pasteBottomOffset)
        ret = acl.media.dvpp_vpc_crop_and_paste_async(self._dvpp_channel_desc,
                                                      input_desc,
                                                      output_desc,
                                                      self._crop_config,
                                                      self._paste_config,
                                                      self._stream)
        utils.check_ret("acl.media.dvpp_vpc_crop_and_paste_async", ret)
        ret = acl.rt.synchronize_stream(self._stream)
        utils.check_ret("acl.rt.synchronize_stream", ret)
        print('[Dvpp] vpc crop and paste stage success')
        stride_width = crop_and_paste_width - 2 * dx
        stride_height = crop_and_paste_height - 2 * dy
        #stride_width = utils.align_up16(crop_and_paste_width)
        #stride_height = utils.align_up2(crop_and_paste_height)

        return AclImage(out_buffer, stride_width,
                        stride_height, out_buffer_size, constants.MEMORY_DVPP)
Beispiel #7
0
def execute(model_path):

    ## Initialization ##
    #initialize acl runtime 
    acl_resource = AclResource()
    acl_resource.init()

    ## Prepare Model ##
    # parameters for model path and model inputs
    model_parameters = {
        'model_dir': model_path,
        'width': 368, # model input width      
        'height': 368, # model input height
    }
    # perpare model instance: init (loading model from file to memory)
    # model_processor: preprocessing + model inference + postprocessing
    model_processor = ModelProcessor(acl_resource, model_parameters)
    
    ## Get Input ##
    # Initialize Camera
    cap = Camera(id = 0, fps = 10)

    ## Set Output ##
    # open the presenter channel
    chan = presenteragent.presenter_channel.open_channel(BODYPOSE_CONF)
    if chan == None:
        print("Open presenter channel failed")
        return



    while True:
        ## Read one frame from Camera ## 
        img_original = cap.read()
        if not img_original:
            print('Error: Camera read failed')
            break
        # Camera Input (YUV) to RGB Image
        image_byte = img_original.tobytes()
        image_array = np.frombuffer(image_byte, dtype=np.uint8)
        img_original = YUVtoRGB(image_array)
        img_original = cv2.flip(img_original,1)

        ## Model Prediction ##
        # model_processor.predict: processing + model inference + postprocessing
        # canvas: the picture overlayed with human body joints and limbs
        canvas = model_processor.predict(img_original)
        
        ## Present Result ##
        # convert to jpeg image for presenter server display
        _,jpeg_image = cv2.imencode('.jpg',canvas)
        # construct AclImage object for presenter server
        jpeg_image = AclImage(jpeg_image, img_original.shape[0], img_original.shape[1], jpeg_image.size)
        # send to presenter server
        chan.send_detection_data(img_original.shape[0], img_original.shape[1], jpeg_image, [])

    # release the resources
    cap.release()
Beispiel #8
0
    def read(self):
        """Read frame from camera"""
        frame_data = CameraOutputC()
        ret = libatlas.ReadCameraFrame(self._id, byref(frame_data))
        if (ret != CAMERA_OK):
            log_error("Read camera %d failed" % (self._id))
            return None

        return AclImage(addressof(frame_data.data.contents), self._width,
                        self._height, self._size, const.MEMORY_DVPP)
Beispiel #9
0
 def _get_pic_desc_data(self, pic_desc, user_data):
     pic_data = acl.media.dvpp_get_pic_desc_data(pic_desc)
     pic_data_size = acl.media.dvpp_get_pic_desc_size(pic_desc)
     ret_code = acl.media.dvpp_get_pic_desc_ret_code(pic_desc)
     if ret_code:
         channel_id, frame_id = user_data
         acl_log.log_error("Decode channel %d frame %d failed, error %d"
                           % (channel_id, frame_id, ret_code))
         acl.media.dvpp_free(pic_data)
     else:
         image = AclImage(pic_data, self._width, self._height,
                          pic_data_size, const.MEMORY_DVPP)
         self._frame_queue.put(image)
     acl.media.dvpp_destroy_pic_desc(pic_desc)
Beispiel #10
0
    def jpegd(self, image):
        #jepg图片转yuv图片
        #创建转换输出图片desc
        output_desc, out_buffer = self._gen_jpegd_out_pic_desc(image)
        ret = acl.media.dvpp_jpeg_decode_async(self._dvpp_channel_desc,
                                               image.data(), image.size,
                                               output_desc, self._stream)
        if ret != ACL_ERROR_NONE:
            print("dvpp_jpeg_decode_async failed ret={}".format(ret))
            return None

        ret = acl.rt.synchronize_stream(self._stream)
        if ret != ACL_ERROR_NONE:
            print("dvpp_jpeg_decode_async failed ret={}".format(ret))
            return None

        width, height, size = self._stride_yuv_size(image.width, image.height)
        return AclImage(out_buffer, width, height, size)
Beispiel #11
0
 def resize(self, image, resize_width, resize_height):
     """
     Scale yuvsp420 picture to specified size
     """
     # Generate input picture desc
     input_desc = self._gen_input_pic_desc(image)
     # Calculate the image size after scaling
     stride_width = utils.align_up16(resize_width)
     stride_height = utils.align_up2(resize_height)
     output_size = utils.yuv420sp_size(stride_width, stride_height)
     # Request memory for the zoomed picture
     out_buffer, ret = acl.media.dvpp_malloc(output_size)
     if ret != constants.ACL_ERROR_NONE:
         log_error("Dvpp malloc failed, error: ", ret)
         return None
     # Create output image
     output_desc = self._gen_output_pic_desc(resize_width, resize_height,
                                             out_buffer, output_size)
     if output_desc is None:
         log_error("Gen resize output desc failed")
         return None
     # Call dvpp asynchronous zoom interface to zoom pictures
     ret = acl.media.dvpp_vpc_resize_async(self._dvpp_channel_desc,
                                           input_desc,
                                           output_desc,
                                           self._resize_config,
                                           self._stream)
     if ret != constants.ACL_ERROR_NONE:
         log_error("Vpc resize async failed, error: ", ret)
         return None
     # Wait for the zoom operation to complete
     ret = acl.rt.synchronize_stream(self._stream)
     if ret != constants.ACL_ERROR_NONE:
         log_error("Resize synchronize stream failed, error: ", ret)
         return None
     # Release the resources requested for scaling
     acl.media.dvpp_destroy_pic_desc(input_desc)
     acl.media.dvpp_destroy_pic_desc(output_desc)
     return AclImage(out_buffer, stride_width,
                     stride_height, output_size, constants.MEMORY_DVPP)
Beispiel #12
0
def main():
    # check param
    if (len(sys.argv) != 2):
        print("The App arg is invalid")
        exit(1)

    # get all pictures
    image_dir = sys.argv[1]
    images_list = [
        os.path.join(image_dir, img) for img in os.listdir(image_dir)
        if os.path.splitext(img)[1] in const.IMG_EXT
    ]

    acl_resource = AclResource()
    acl_resource.init()

    # instantiation Cartoonization object
    cartoonization = Cartoonization(MODEL_PATH, MODEL_WIDTH, MODEL_HEIGHT)

    # init
    ret = cartoonization.init()
    utils.check_ret("Cartoonization.init ", ret)

    # create dir to save result
    if not os.path.isdir('../outputs'):
        os.mkdir('../outputs')

    for image_file in images_list:
        # read image
        image = AclImage(image_file)
        # preprocess
        crop_and_paste_image = cartoonization.pre_process(image)
        print("[Sample] pre process end")
        # inference
        result = cartoonization.inference([
            crop_and_paste_image,
        ])
        # postprocess
        cartoonization.post_process(result, image_file, image)
Beispiel #13
0
    def jpege(self, image):
        """
        Convert yuv420sp pictures to jpeg pictures
        """
        # create input image
        input_desc = self._gen_input_pic_desc(image)
        # Predict the memory size required for conversion
        output_size, ret = acl.media.dvpp_jpeg_predict_enc_size(
            input_desc, self._jpege_config)
        if (ret != constants.ACL_ERROR_NONE):
            log_error("Predict jpege output size failed")
            return None
        # Request memory required for conversion
        output_buffer, ret = acl.media.dvpp_malloc(output_size)
        if (ret != constants.ACL_ERROR_NONE):
            log_error("Malloc jpege output memory failed")
            return None
        output_size_array = np.array([output_size], dtype=np.int32)
        output_size_ptr = acl.util.numpy_to_ptr(output_size_array)

        # Call jpege asynchronous interface to convert pictures
        ret = acl.media.dvpp_jpeg_encode_async(self._dvpp_channel_desc,
                                               input_desc, output_buffer,
                                               output_size_ptr,
                                               self._jpege_config,
                                               self._stream)
        if (ret != constants.ACL_ERROR_NONE):
            log_error("Jpege failed, ret ", ret)
            return None
        # Wait for the conversion to complete
        ret = acl.rt.synchronize_stream(self._stream)
        if (ret != constants.ACL_ERROR_NONE):
            print("Jpege synchronize stream, failed, ret ", ret)
            return None
        # Release resources
        acl.media.dvpp_destroy_pic_desc(input_desc)
        return AclImage(
            output_buffer, image.width, image.height, int(
                output_size_array[0]), constants.MEMORY_DVPP)
Beispiel #14
0
 def resize(self, image, resize_width, resize_height):
     # resize yuvsp420 image to specified size 
     # generate input image desc
     input_desc = self._gen_input_pic_desc(image)
     # calculate resized image size
     stride_width = align_up16(resize_width)
     stride_height = align_up2(resize_height)
     output_size = yuv420sp_size(stride_width, stride_height)
     # allocate memory for resized image
     out_buffer, ret = acl.media.dvpp_malloc(output_size)
     if ret != ACL_ERROR_NONE:
         print("Dvpp malloc failed, error: ", ret)
         return None
     #create output desc
     output_desc = self._gen_output_pic_desc(resize_width, resize_height,
                                             out_buffer, output_size)
     if output_desc == None:
         print("Gen resize output desc failed")
         return None
     # call DVPP asynchronous resize interface to resize image
     ret = acl.media.dvpp_vpc_resize_async(self._dvpp_channel_desc,
                                           input_desc,
                                           output_desc,
                                           self._resize_config,
                                           self._stream)
     if ret != ACL_ERROR_NONE:
         print("Vpc resize async failed, error: ", ret)
         return None
     # wait for resize to complete
     ret = acl.rt.synchronize_stream(self._stream)
     if ret != ACL_ERROR_NONE:
         print("Resize synchronize stream failed, error: ", ret)
         return None
     # release allocated memory for resize
     acl.media.dvpp_destroy_pic_desc(input_desc)
     acl.media.dvpp_destroy_pic_desc(output_desc)
     return AclImage(out_buffer, stride_width,
                     stride_height, output_size, MEMORY_DVPP)
Beispiel #15
0
 def resize(self, image, resize_width, resize_height):
     #缩放yuvsp420图片到指定大小
     #先生成输入图片desc
     input_desc = self._gen_input_pic_desc(image)
     #计算缩放后的图片尺寸
     stride_width = align_up16(resize_width)
     stride_height = align_up2(resize_height)
     output_size = yuv420sp_size(stride_width, stride_height)
     #为缩放后的图片申请内存
     out_buffer, ret = acl.media.dvpp_malloc(output_size)
     if ret != ACL_ERROR_NONE:
         print("Dvpp malloc failed, error: ", ret)
         return None
     #创建输出desc
     output_desc = self._gen_output_pic_desc(resize_width, resize_height,
                                             out_buffer, output_size)
     if output_desc == None:
         print("Gen resize output desc failed")
         return None
     #调用dvpp异步缩放接口缩放图片
     ret = acl.media.dvpp_vpc_resize_async(self._dvpp_channel_desc,
                                           input_desc, output_desc,
                                           self._resize_config,
                                           self._stream)
     if ret != ACL_ERROR_NONE:
         print("Vpc resize async failed, error: ", ret)
         return None
     #等待缩放操作完成
     ret = acl.rt.synchronize_stream(self._stream)
     if ret != ACL_ERROR_NONE:
         print("Resize synchronize stream failed, error: ", ret)
         return None
     #释放缩放申请的资源
     acl.media.dvpp_destroy_pic_desc(input_desc)
     acl.media.dvpp_destroy_pic_desc(output_desc)
     return AclImage(out_buffer, stride_width, stride_height, output_size,
                     MEMORY_DVPP)
Beispiel #16
0
    def jpege(self, image):
        # convert yuv420sp image to jpeg image
        # create input image desc
        input_desc = self._gen_input_pic_desc(image)
        # predict memory size for conversion 
        output_size, ret = acl.media.dvpp_jpeg_predict_enc_size(
            input_desc, self._jpege_config)
        if (ret != ACL_ERROR_NONE):
            print("Predict jpege output size failed")
            return None
        # allocate memory for conversion 
        output_buffer, ret = acl.media.dvpp_malloc(output_size)
        if (ret != ACL_ERROR_NONE):
            print("Malloc jpege output memory failed")
            return None
        # output size is an parameter for both input and output, which needs to be a pointer 
        output_size_array = np.array([output_size], dtype=np.int32)
        output_size_ptr = acl.util.numpy_to_ptr(output_size_array)

        # call jpeg asynchronous interface to convert image异步接口转换图片
        ret = acl.media.dvpp_jpeg_encode_async(self._dvpp_channel_desc,
                                               input_desc, output_buffer,
                                               output_size_ptr,
                                               self._jpege_config,
                                               self._stream)
        if (ret != ACL_ERROR_NONE):
            print("Jpege failed, ret ", ret)
            return None
        # wait for conversion to complete
        ret = acl.rt.synchronize_stream(self._stream)
        if (ret != ACL_ERROR_NONE):
            print("Jpege synchronize stream, failed, ret ", ret)
            return None
        # release resource 
        acl.media.dvpp_destroy_pic_desc(input_desc)
        return AclImage(output_buffer, image.width, 
                        image.height, int(output_size_array[0]), MEMORY_DVPP)
Beispiel #17
0
    def jpege(self, image):
        #将yuv420sp图片转为jpeg图片
        #创建输入图片desc
        input_desc = self._gen_input_pic_desc(image)
        #预测转换所需内存大小
        output_size, ret = acl.media.dvpp_jpeg_predict_enc_size(
            input_desc, self._jpege_config)
        if (ret != ACL_ERROR_NONE):
            print("Predict jpege output size failed")
            return None
        #申请转换需要的内存
        output_buffer, ret = acl.media.dvpp_malloc(output_size)
        if (ret != ACL_ERROR_NONE):
            print("Malloc jpege output memory failed")
            return None
        #输出大小参数为既是输入参数,也是输出参数,需要时一个指针
        output_size_array = np.array([output_size], dtype=np.int32)
        output_size_ptr = acl.util.numpy_to_ptr(output_size_array)

        #调用jpege异步接口转换图片
        ret = acl.media.dvpp_jpeg_encode_async(self._dvpp_channel_desc,
                                               input_desc, output_buffer,
                                               output_size_ptr,
                                               self._jpege_config,
                                               self._stream)
        if (ret != ACL_ERROR_NONE):
            print("Jpege failed, ret ", ret)
            return None
        #等待转换完成
        ret = acl.rt.synchronize_stream(self._stream)
        if (ret != ACL_ERROR_NONE):
            print("Jpege synchronize stream, failed, ret ", ret)
            return None
        #释放资源
        acl.media.dvpp_destroy_pic_desc(input_desc)
        return AclImage(output_buffer, image.width, image.height,
                        int(output_size_array[0]), MEMORY_DVPP)
Beispiel #18
0
def main():
    """
    main
    """
    if (len(sys.argv) != 2):
        print("The App arg is invalid")
        exit(1)

    acl_resource = AclResource()
    acl_resource.init()

    gesture = Gesture(MODEL_PATH, MODEL_WIDTH, MODEL_HEIGHT)

    ret = gesture.init()
    utils.check_ret("Gesture.init ", ret)

    image_dir = sys.argv[1]
    images_list = [
        os.path.join(image_dir, img) for img in os.listdir(image_dir)
        if os.path.splitext(img)[1] in const.IMG_EXT
    ]

    if not os.path.isdir(os.path.join(SRC_PATH, "../outputs")):
        os.mkdir(os.path.join(SRC_PATH, "../outputs"))

    for image_file in images_list:

        image = AclImage(image_file)

        resized_image = gesture.pre_process(image)
        print("pre process end")

        result = gesture.inference([
            resized_image,
        ])

        gesture.post_process(result, image_file)
Beispiel #19
0
def main():
    """
    main
    """
    image_dir = os.path.join(currentPath, "data")

    if not os.path.exists(OUTPUT_DIR):
        os.mkdir(OUTPUT_DIR)

    acl_resource = AclResource()
    acl_resource.init()

    classify = Classify(MODEL_PATH, MODEL_WIDTH, MODEL_HEIGHT)
    ret = classify.init()
    utils.check_ret("Classify init ", ret)

    images_list = [
        os.path.join(image_dir, img) for img in os.listdir(image_dir)
        if os.path.splitext(img)[1] in const.IMG_EXT
    ]

    for image_file in images_list:
        print('=== ' + os.path.basename(image_file) + '===')

        # read image
        image = AclImage(image_file)

        # Preprocess the picture
        resized_image = classify.pre_process(image)

        # Inferencecd
        result = classify.inference([
            resized_image,
        ])

        # # Post-processing
        classify.post_process(result, image_file)
Beispiel #20
0
        self.callback_run_flag = False
        while self._is_thread_exit == False:
            time.sleep(0.01)
        ret = acl.util.stop_thread(self._cb_thread_id)
        log_info("[INFO] stop_thread", ret)


if __name__ == '__main__':
    ret = acl.init("")
    utils.check_ret("acl.init", ret)
    ret = acl.rt.set_device(DEVICE_ID)
    utils.check_ret("acl.rt.set_device", ret)
    run_mode, ret = acl.rt.get_run_mode()
    utils.check_ret("acl.rt.get_run_mode", ret)
    venc_handel = AclVenc()

    venc_cnt = 16
    while venc_cnt:
        # load file
        image = AclImage(VENC_FILE_PATH, 1280, 720)
        image = image.copy_to_dvpp()
        venc_handel.process(image)
        venc_cnt -= 1

    log_info("process end")
    venc_handel.finish()
    ret = acl.rt.reset_device(DEVICE_ID)
    utils.check_ret("acl.rt.reset_device", ret)
    ret = acl.finalize()
    utils.check_ret("acl.finalize", ret)
Beispiel #21
0
def execute(model_path, frames_input_src, output_dir, is_presenter_server):

    ## Initialization ##
    #initialize acl runtime
    acl_resource = AclResource()
    acl_resource.init()

    ## Prepare Model ##
    # parameters for model path and model inputs
    model_parameters = {
        'model_dir': model_path,
        'width': 368,  # model input width      
        'height': 368,  # model input height
    }
    # perpare model instance: init (loading model from file to memory)
    # model_processor: preprocessing + model inference + postprocessing
    model_processor = ModelProcessor(acl_resource, model_parameters)

    ## Get Input ##
    # Read the video input using OpenCV
    cap = cv2.VideoCapture(frames_input_src)

    ## Set Output ##
    if is_presenter_server:
        # if using presenter server, then open the presenter channel
        chan = presenteragent.presenter_channel.open_channel(BODYPOSE_CONF)
        if chan == None:
            print("Open presenter channel failed")
            return
    else:
        # if saving result as video file (mp4), then set the output video writer using opencv
        video_output_path = '{}/demo-{}-{}.mp4'.format(
            output_dir, os.path.basename(frames_input_src),
            str(random.randint(1, 100001)))
        video_writer = cv2.VideoWriter(video_output_path, 0x7634706d, 25,
                                       (1280, 720))
        if video_writer == None:
            print('Error: cannot get video writer from openCV')

    while (cap.isOpened()):
        ## Read one frame of the input video ##
        ret, img_original = cap.read()

        if not ret:
            print('Cannot read more, Reach the end of video')
            break

        ## Model Prediction ##
        # model_processor.predict: processing + model inference + postprocessing
        # canvas: the picture overlayed with human body joints and limbs
        canvas = model_processor.predict(img_original)

        ## Present Result ##
        if is_presenter_server:
            # convert to jpeg image for presenter server display
            _, jpeg_image = cv2.imencode('.jpg', canvas)
            # construct AclImage object for presenter server
            jpeg_image = AclImage(jpeg_image, img_original.shape[0],
                                  img_original.shape[1], jpeg_image.size)
            # send to presenter server
            chan.send_detection_data(img_original.shape[0],
                                     img_original.shape[1], jpeg_image, [])

        else:
            # save to video
            video_writer.write(canvas)

    # release the resources
    cap.release()
    if not is_presenter_server:
        video_writer.release()
Beispiel #22
0
def main():
    """main"""
    if (len(sys.argv) != 2):
        print("The App arg is invalid")
        exit(1)

    acl_resource = AclResource()
    acl_resource.init()
    model = Model(MODEL_PATH)
    dvpp = Dvpp(acl_resource)

    image_dir = sys.argv[1]
    images_list = [
        os.path.join(image_dir, img) for img in os.listdir(image_dir)
        if os.path.splitext(img)[1] in const.IMG_EXT
    ]

    # Create a directory to save inference results
    if not os.path.isdir('./outputs'):
        os.mkdir('./outputs')

    # Create a directory to save the intermediate results of the large image detection
    if not os.path.isdir('./bigpic'):
        os.mkdir('./bigpic')

    # Create a directory to save the results of the big picture cropping inference
    outCrop = os.path.join('./bigpic', 'output')
    if not os.path.isdir(outCrop):
        os.mkdir(outCrop)

    # Create a directory, save the large and cropped pictures
    cropImg = os.path.join('./bigpic', 'cropimg')
    if not os.path.isdir(cropImg):
        os.mkdir(cropImg)

    image_info = construct_image_info()

    for image_file in images_list:
        imagename = get_file_name(image_file)
        tempfile = os.path.splitext(imagename)[0]

        imgdic = {}
        imgdic['name'] = imagename
        obj_res = []

        img = cv2.imread(image_file, -1)
        (width, height, depth) = img.shape
        if width > 1000 and height > 1000:
            # Create a directory to save the divided pictures of each big picture
            crop_target = os.path.join(cropImg, tempfile)
            if not os.path.isdir(crop_target):
                os.mkdir(crop_target)

            # Create a directory to save the inference results of each large image
            out_target = os.path.join(outCrop, tempfile)
            if not os.path.isdir(out_target):
                os.mkdir(out_target)

            # Large image clipping function
            crop_picture(image_file, crop_target)
            cropimg_list = [
                os.path.join(crop_target, imgs)
                for imgs in os.listdir(crop_target)
                if os.path.splitext(imgs)[1] in const.IMG_EXT
            ]

            # After the execution of the crop function is over,
            # the small picture after the big picture crop should be saved in a folder crop_target
            for cropimg_file in cropimg_list:
                print("the crop filename is :\t", cropimg_file)
                image = AclImage(cropimg_file)
                resized_image = pre_process(image, dvpp)
                result = model.execute([resized_image, image_info])
                resdic = post_process_big(result, image, cropimg_file,
                                          out_target)
                obj_res.extend(resdic)

            imgdic['object_result'] = obj_res

            merge_picture(out_target, tempfile)

        # Read in the picture, if the picture size is less than 1000x1000,
        # it will be read in and processed normally
        else:
            print("detect the small picture")
            image = AclImage(image_file)
            resized_image = pre_process(image, dvpp)
            print("pre process end")
            result = model.execute([resized_image, image_info])
            resdic = post_process(result, image, image_file)
            obj_res.extend(resdic)
            imgdic['object_result'] = obj_res
Beispiel #23
0
def execute(model_path):

    ## Initialization ##
    #initialize acl runtime 
    acl_resource = AclResource()
    acl_resource.init()

    # load offline model for face detection
    model_face = Model(acl_resource, MODEL_PATH_FACE)
    model_head_pose = Model(acl_resource, MODEL_PATH_HEAD_POSE)
    
    ## Prepare Model ##
    # parameters for model path and model inputs
    model_parameters = {
        'model_dir': model_path,
        'width': 368, # model input width      
        'height': 368, # model input height
    }
    # perpare model instance: init (loading model from file to memory)
    # model_processor: preprocessing + model inference + postprocessing
    model_processor = ModelProcessor(acl_resource, model_parameters)
    last_five_frame_result = [] 

    # Initialize Camera
    cap = Camera(id = 0, fps = 10)

    # Read reference images
    img_left1 = cv2.imread(LEFT1_PATH)
    img_left2 = cv2.imread(LEFT2_PATH)
    img_right1 = cv2.imread(RIGHT1_PATH)
    img_right2 = cv2.imread(RIGHT2_PATH)
    img_stop = cv2.imread(STOP_PATH)
    # Get reference output
    canvas_left1,joint_list_left1 = model_processor.predict(img_left1)
    canvas_left2,joint_list_left2 = model_processor.predict(img_left2)
    canvas_right1,joint_list_right1 = model_processor.predict(img_right1)
    canvas_right2,joint_list_right2 = model_processor.predict(img_right2)
    canvas_stop,joint_list_stop = model_processor.predict(img_stop)
    # Get angles from reference images
    angle_left1=getangle(joint_list_left1)
    angle_left2=getangle(joint_list_left2)
    angle_right1=getangle(joint_list_right1)
    angle_right2=getangle(joint_list_right2)
    angle_stop=getangle(joint_list_stop)
    # Initialize count
    countleft=0
    countright=0
    countstop=0

    ## Presenter Server Output ##
    chan = presenteragent.presenter_channel.open_channel(BODYPOSE_CONF)
    if chan == None:
        print("Open presenter channel failed")
        return

    predict = StateMachine()

    while True:
        ## Read one frame of the input video ## 
        img_original = cap.read()

        if not img_original:
            print('Error: Camera read failed')
            break

        ## HEAD POSE BEGIN ##
        # Camera Input (YUV) to RGB Image
        image_byte = img_original.tobytes()
        image_array = np.frombuffer(image_byte, dtype=np.uint8) 
        img_original = YUVtoRGB(image_array)
        img_original = cv2.flip(img_original, -1)
        
        # Make copy of image for head model processing and body model processing 
        img_bodypose = copy.deepcopy(img_original)
        img_headpose = copy.deepcopy(img_original)
        
        ## Model Prediction ##
        # model_processor.predict: processing + model inference + postprocessing
        # canvas: the picture overlayed with human body joints and limbs
        # img_bodypose is modified with skeleton
        canvas, joint_list_input = model_processor.predict(img_bodypose)
        
        angle_input=getangle(joint_list_input)


        dif5=abs(angle_input-angle_left1)
        dif6=abs(angle_input-angle_left2)
        dif7=abs(angle_input-angle_right1)
        dif8=abs(angle_input-angle_right2)
        dif9=abs(angle_input-angle_stop)
        
        result = "invalid"
        # last_five_result = "invalid"
        if all( i < 25 for i in dif5):
            result = "left1"
        elif all( i < 25 for i in dif6):
            result = "left2"
        elif all( i < 25 for i in dif7):
            result = "right1"
        elif all( i < 25 for i in dif8):
            result = "right2"
        elif all( i < 25 for i in dif9):
            result = "stop"            
        
        font                   = cv2.FONT_HERSHEY_SIMPLEX
        bottomLeftCornerOfText = (10, 100)
        fontScale              = 1
        fontColor              = (255,255,255)
        lineType               = 2

        cv2.putText(img_bodypose, result, 
            bottomLeftCornerOfText, 
            font, 
            fontScale,
            fontColor,
            lineType)
    

        ## FACE DETECTION MODEL BEGIN ##
        input_image = PreProcessing_face(img_headpose)

        face_flag = False
        try:
            resultList_face  = model_face.execute([input_image]).copy()
            # draw bounding box on img_bodypose
            xmin, ymin, xmax, ymax = PostProcessing_face(img_bodypose, resultList_face)
            bbox_list = [xmin, ymin, xmax, ymax]
            face_flag = True
        except:
            print('No face detected')
        # FACE DETECTION MODEL END ##

        ## HEADPOSE BEGIN ##
        head_status_string = "No output"
        if face_flag is True:
            input_image = PreProcessing_head(img_headpose, bbox_list)
            try: 
                resultList_head = model_head_pose.execute([input_image]).copy()	
            except Exception as e:
                print('No head pose estimation output')

            # draw headpose points on image
            facepointList, head_status_string, canvas = PostProcessing_head(resultList_head, bbox_list, img_bodypose)
            print('Headpose:', head_status_string)

        headpose_result = head_status_string
        ## HEADPOSE END ##

        predict.staterunner(result,headpose_result)
        ## Present Result ##
        # convert to jpeg image for presenter server display
        _,jpeg_image = cv2.imencode('.jpg', img_bodypose)
        # construct AclImage object for presenter server
        jpeg_image = AclImage(jpeg_image, img_original.shape[0], img_original.shape[1], jpeg_image.size)
        # send to presenter server
        chan.send_detection_data(img_original.shape[0], img_original.shape[1], jpeg_image, [])

    cap.release()