Beispiel #1
0
def main():
    """
    acl resource initialization
    """

    if not os.path.exists(OUTPUT_DIR):
        os.mkdir(OUTPUT_DIR)
    #ACL resource initialization
    acl_resource = AclResource()
    acl_resource.init()

    model = Model(model_path)
    images_list = [
        os.path.join(INPUT_DIR, img) for img in os.listdir(INPUT_DIR)
        if os.path.splitext(img)[1] in IMG_EXT
    ]

    for pic in images_list:

        orig_shape, orig_l, l_data = preprocess(pic)
        result_list = model.execute([
            l_data,
        ])
        postprocess(result_list, pic, orig_shape, orig_l)
        break
    print("Execute end")
Beispiel #2
0
def main(image_dirs, masks_dirs):    
    
    if not os.path.exists(OUTPUT_DIR):
        os.mkdir(OUTPUT_DIR)

    #acl  init
    acl_resource = AclResource()
    stream = acl_resource.init()
    #load model
    model = Model( MODEL_PATH)
    matmul_om = Model( MODEL_MATMUL_PATH)
    

    paths_img, paths_mask = get_imgs_masks_file_list(image_dirs, masks_dirs)
    for i in range(len(paths_img)):
        print('==========')        
        raw_img, raw_mask = readimages(paths_img[i], paths_mask[i])
        print("file: % s, shape= % s" % (paths_img[i], raw_img.shape))

        img_large, mask_large, img_512, mask_512 = pre_process(raw_img, raw_mask)        

        inpainted_512, attention, mask_512_new  = inference(model,[img_512, mask_512,])                

        # post-processing
        res_raw_size = post_process(matmul_om,raw_img, img_large, mask_large, inpainted_512[0], img_512, mask_512_new[0], attention[0])
        filename = os.path.join(OUTPUT_DIR, 'outpaint_' + os.path.basename(paths_img[i]))
        cv2.imwrite(filename , res_raw_size)
        
    print("Execute end")
Beispiel #3
0
def main():
    """
    main
    """
    #create output directory
    if not os.path.exists(OUTPUT_DIR):
        os.mkdir(OUTPUT_DIR)

    #acl init
    acl_resource = AclResource()
    acl_resource.init()

    #load model
    model = Model(MODEL_PATH)
    src_dir = os.listdir(INPUT_DIR)

    #infer picture
    for pic in src_dir:
        if not pic.lower().endswith(('.bmp', '.dib', '.png', '.jpg', 
                                    '.jpeg', '.pbm', '.pgm', '.ppm', '.tif', '.tiff')):
            print('it is not a picture, %s, ignore this file and continue,' % pic)
            continue
        #read picture
        pic_path = os.path.join(INPUT_DIR, pic)
        bgr_img = cv2.imread(pic_path).astype(np.float32)

        #get pic data
        orig_shape, rgb_img = preprocess(bgr_img)

        #inference
        result_list = model.execute([rgb_img, ])    

        #postprocess
        postprocess(result_list, orig_shape, bgr_img, pic)
    print("Execute end")
Beispiel #4
0
    def __init__(self,
                 device_id,
                 model_path,
                 model_input_width,
                 model_input_height):
        self.device_id = device_id      # int
        self.model_path = model_path    # string
        self.model_id = None            # pointer
        self.context = None             # pointer

        self.input_data = None
        self.output_data = None
        self.model_desc = None          # pointer when using
        self.load_input_dataset = None
        self.load_output_dataset = None
        self.init_resource()

        self._model_input_width = model_input_width
        self._model_input_height = model_input_height

        self.model_process = Model(self.context,
                                   self.stream,
                                   self.model_path)

        self.dvpp_process = Dvpp(self.stream,
                                 model_input_width,
                                 model_input_height)

        self.sing_op = SingleOp(self.stream)
Beispiel #5
0
def main():
    acl_resource = AclResource()
    acl_resource.init()

    detect = VggSsd(acl_resource, MODEL_WIDTH, MODEL_HEIGHT)
    model = Model(MODEL_PATH)

    chan = presenteragent.presenter_channel.open_channel(MASK_DETEC_CONF)
    if chan is None:
        print("Open presenter channel failed")
        return

    lenofUrl = len(sys.argv)

    if lenofUrl <= 1:
        print("[ERROR] Please input mp4/Rtsp URL")
        exit()
    elif lenofUrl >= 3:
        print("[ERROR] param input Error")
        exit()
    URL = sys.argv[1]
    URL1 = re.match('rtsp://', URL)
    URL2 = re.search('.mp4', URL)

    if URL1 is None and URL2 is None:
        print("[ERROR] should input correct URL")
        exit()
    cap = video.AclVideo(URL)

    while True:
        # Read a frame
        ret, image = cap.read()

        if ret != 0:
            print("read None image, break")
            break

        #pre process
        model_input = detect.pre_process(image)
        if model_input is None:
            print("Pre process image failed")
            break

        # inference
        result = model.execute(model_input)
        if result is None:
            print("execute mode failed")
            break

        # post process
        jpeg_image, detection_list = detect.post_process(result, image)
        if jpeg_image is None:
            print("The jpeg image for present is None")
            break

        chan.send_detection_data(CAMERA_FRAME_WIDTH, CAMERA_FRAME_HEIGHT,
                                 jpeg_image, detection_list)
Beispiel #6
0
    def __init__(self, acl_resource, model_path, model_width, model_height):
        self.total_buffer = None
        self._model_path = model_path
        self._model_width = model_width
        self._model_height = model_height

        self._model = Model(model_path)
        self._dvpp = Dvpp(acl_resource)
        print("The App arg is __init__")
Beispiel #7
0
def main():
    """
    acl resource initialization
    """
    acl_resource = AclResource()
    acl_resource.init()

    #load model
    model = Model(model_path)
    chan = presenteragent.presenter_channel.open_channel(COLORIZATION_CONF)
    if chan is None:
        print("Open presenter channel failed")
        return

    lenofUrl = len(sys.argv)

    if lenofUrl <= 1:
        print("[ERROR] Please input mp4/Rtsp URL")
        exit()
    elif lenofUrl >= 3:
        print("[ERROR] param input Error")
        exit()
    URL = sys.argv[1]
    URL1 = re.match('rtsp://', URL)
    URL2 = re.search('.mp4', URL)

    if URL1 is None and URL2 is None:
        print("[ERROR] should input correct URL")
        exit()

    cap = cv.VideoCapture(URL)

    #Gets the total frames
    frames_num = cap.get(7)
    currentFrames = 0

    while True:
        #read image
        ret, frame = cap.read()

        if ret is not True:
            print("read None image, break")
            break

        if currentFrames == frames_num - 1:
            currentFrames = 0
            cap.set(1, 0)

        currentFrames += 1

        #Gets the L channel value
        orig_shape, orig_l, l_data = preprocess(frame)
        result_list = model.execute([l_data,])
        result_jpeg = postprocess(result_list, orig_shape, orig_l)
        chan.send_image(orig_shape[0], orig_shape[1], result_jpeg)
Beispiel #8
0
def main():
    """
    main
    """
    if (len(sys.argv) != 2):
        print("Please input video path")
        exit(1)

    #acl init
    acl_resource = AclResource()
    acl_resource.init()
    #load model
    model = Model(MODEL_PATH)

    #open video
    video_path = sys.argv[1]
    print("open video ", video_path)
    cap = cv2.VideoCapture(video_path)
    fps = cap.get(cv2.CAP_PROP_FPS)
    Width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
    Height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))

    lf.set_img_size((Width, Height))

    #create output directory
    if not os.path.exists(OUTPUT_DIR):
        os.mkdir(OUTPUT_DIR)
    output_Video = os.path.basename(video_path)
    output_Video = os.path.join(OUTPUT_DIR, output_Video)

    fourcc = cv2.VideoWriter_fourcc(
        *'mp4v')  # DIVX, XVID, MJPG, X264, WMV1, WMV2
    outVideo = cv2.VideoWriter(output_Video, fourcc, fps, (Width, Height))

    # Read until video is completed
    while (cap.isOpened()):
        ret, frame = cap.read()
        if ret == True:
            #preprocess
            orig_shape, rgb_img, framecv = preprocess(frame)
            #inference
            result_list = model.execute([
                rgb_img,
            ])
            #postprocess
            frame = postprocess(result_list, framecv, orig_shape)
            outVideo.write(frame)
        # Break the loop
        else:
            break
    cap.release()
    outVideo.release()
    print("Execute end")
Beispiel #9
0
    def __init__(self, acl_resource, params):
        self._acl_resource = acl_resource
        self.params = params
        self._model_width = params['width']
        self._model_height = params['height']

        assert 'model_dir' in params and params[
            'model_dir'] is not None, 'Review your param: model_dir'
        assert os.path.exists(
            params['model_dir']), "Model directory doesn't exist {}".format(
                params['model_dir'])

        # load model from path, and get model ready for inference
        self.model = Model(acl_resource, params['model_dir'])
Beispiel #10
0
 def __init__(self, device_id, model_path, vdec_out_path, model_input_width,
              model_input_height):
     self.device_id = device_id  # int
     self.model_path = model_path  # string
     self.context = None  # pointer
     self.stream = None
     self.model_input_width = model_input_width
     self.model_input_height = model_input_height,
     self.init_resource()
     self.model_process = Model(self.context, self.stream, model_path)
     self.vdec_process = Vdec(self.context, self.stream, vdec_out_path)
     self.dvpp_process = Dvpp(self.stream, model_input_width,
                              model_input_height)
     self.model_input_width = model_input_width
     self.model_input_height = model_input_height
     self.vdec_out_path = vdec_out_path
Beispiel #11
0
    def init(self):
        self._init_resource()
        self._dvpp = Dvpp(self.stream, self.run_mode)

        ret = self._dvpp.init_resource()
        if ret != SUCCESS:
            print("Init dvpp failed")
            return FAILED

        self._model = Model(self.run_mode, self._model_path)
        ret = self._model.init_resource()
        if ret != SUCCESS:
            print("Init model failed")
            return FAILED

        return SUCCESS
Beispiel #12
0
class Classify(object):
    """
	define gesture class
    """
    def __init__(self, acl_resource, model_path, model_width, model_height):
        self._model_path = model_path
        self._model_width = model_width
        self._model_height = model_height
        self._dvpp = Dvpp(acl_resource)
        self._model = Model(model_path)

    def __del__(self):
        if self._dvpp:
            del self._dvpp
        print("[Sample] class Samle release source success")

    def pre_process(self, image):
        """
        pre_precess
        """
        yuv_image = self._dvpp.jpegd(image)
        resized_image = self._dvpp.resize(yuv_image, self._model_width,
                                          self._model_height)
        print("resize yuv end")
        return resized_image

    def inference(self, resized_image):
        """
	    inference
        """
        return self._model.execute([
            resized_image,
        ])

    def post_process(self, infer_output, image_file):
        """
	    post_process
        """
        print("post process")
        data = infer_output[0]
        vals = data.flatten()
        top_k = vals.argsort()[-1:-6:-1]
        print("images:{}".format(image_file))
        print("======== top5 inference results: =============")
        for n in top_k:
            object_class = image_net_classes.get_image_net_class(n)
            print("label:%d  confidence: %f, class: %s" %
                  (n, vals[n], object_class))

        #using pillow, the category with the highest confidence is written on the image and saved locally
        if len(top_k):
            object_class = image_net_classes.get_image_net_class(top_k[0])
            output_path = os.path.join(os.path.join(SRC_PATH, "../outputs"),
                                       os.path.basename(image_file))
            origin_img = Image.open(image_file)
            draw = ImageDraw.Draw(origin_img)
            font = ImageFont.load_default()
            draw.text((10, 50), object_class, font=font, fill=255)
            origin_img.save(output_path)
Beispiel #13
0
def CreateGraphWithoutDVPP(model_name):
    acl_resource = AclResource()
    acl_resource.init()

    MODEL_PATH = model_name + ".om"
    model = Model(acl_resource, MODEL_PATH)

    return model
Beispiel #14
0
class Classify(object):
    """
    Classify
    """
    def __init__(self, model_path, model_width, model_height):
        self._model_path = model_path
        self._model_width = model_width
        self._model_height = model_height
        self._model = Model(model_path)

    @display_time
    def pre_process(self, image):
        """
        pre_process
        """
        bgr_img = cv2.imread(image).astype(np.float32)
        bgr_img = bgr_img / 255.0
        resized_image = cv2.resize(bgr_img, (299, 299))
        return resized_image

    @display_time
    def inference(self, resized_image):
        """
        inference
        """
        return self._model.execute([
            resized_image,
        ])

    @display_time
    def post_process(self, infer_output, image_file):
        """
        post_process
        """
        print("post process")
        data = infer_output[0]
        vals = data.flatten()
        top_k = vals.argsort()[-1:-7:-1]
        print("images:{}".format(image_file))
        print("======== top5 inference results: =============")
        for n in top_k:
            object_class = get_image_net_class(n)
            print("label:%d  confidence: %f, class: %s" %
                  (n, vals[n], object_class))

        #using pillow, the category with the highest confidence is written on the image and saved locally
        if len(top_k):
            object_class = get_image_net_class(top_k[0])
            object_value = vals[top_k[0]]
            output_path = os.path.join(os.path.join(SRC_PATH, "../outputs"),
                                       os.path.basename(image_file))
            origin_img = cv2.imread(image_file)
            font = cv2.FONT_HERSHEY_SIMPLEX
            origin_img = cv2.putText(origin_img, object_class, (10, 100), font,
                                     3, (255, 255, 255), 3)
            origin_img = cv2.putText(origin_img, str(object_value), (10, 200),
                                     font, 2, (255, 255, 255), 3)
            cv2.imwrite(output_path, origin_img)
class ModelProcessor:
    def __init__(self, acl_resource, params):
        self._acl_resource = acl_resource
        self.params = params
        self._model_width = params['width']
        self._model_height = params['height']

        assert 'model_dir' in params and params[
            'model_dir'] is not None, 'Review your param: model_dir'
        assert os.path.exists(
            params['model_dir']), "Model directory doesn't exist {}".format(
                params['model_dir'])

        # load model from path, and get model ready for inference
        self.model = Model(acl_resource, params['model_dir'])

    def predict(self, img_original):

        #preprocess image to get 'model_input'
        model_input = self.preprocess(img_original)

        # execute model inference
        infer_output = self.model.execute([model_input])

        # postprocessing:
        category = self.post_process(infer_output)

        return category

    def preprocess(self, img_original):
        '''
        preprocessing: resize image to model required size, and normalize value
        '''
        scaled_img_data = cv2.resize(img_original,
                                     (self._model_width, self._model_height))
        normalized_img = scaled_img_data - np.array([123, 117, 104])
        # Caffe model after conversion, need input to be NCHW; the orignal image is NHWC, need to be transposed (use .copy() to change memory format)
        preprocessed_img = np.asarray(normalized_img,
                                      dtype=np.float16).transpose([2, 0,
                                                                   1]).copy()

        return preprocessed_img

    def post_process(self, infer_output):
        print("post process")
        data = infer_output[0]
        vals = data.flatten()
        top_k = vals.argsort()[-1:-6:-1]
        print("======== top5 inference results: =============")
        for n in top_k:
            object_class = get_image_net_class(n)
            print("label:%d  confidence: %f, class: %s" %
                  (n, vals[n], object_class))

        object_class = get_image_net_class(top_k[0])
        return object_class
def main():
    """
    acl resource initialization
    """

    acl_resource = AclResource()
    acl_resource.init()

    model = Model(model_path)

    with codecs.open(dict_path, 'r', 'utf-8') as reader:
        for line in reader:
            token = line.strip()
            token_dict[token] = len(token_dict)

    with open(sample_path, "r") as f:
        text = f.read()

    with open(label_path, "r", encoding="utf-8") as f:
        label_dict = json.loads(f.read())

    X1, X2 = preprocess(text)

    X1 = np.ascontiguousarray(X1, dtype='float32')
    X2 = np.ascontiguousarray(X2, dtype='float32')

    X1 = np.expand_dims(X1, 0)
    X2 = np.expand_dims(X2, 0)
    s_time = time.time()

    result_list = model.execute([X1, X2])
    e_time = time.time()
    print(result_list)
    y = postprocess(result_list)

    if not os.path.exists(output_dir):
        os.mkdir(output_dir)
    save_to_file(output_dir + 'prediction_label.txt', label_dict[str(y)])
    print("Original text: %s" % text)
    print("Prediction label: %s" % label_dict[str(y)])

    print("Cost time:", e_time - s_time)
    print("Execute end")
Beispiel #17
0
class ModelProcessor:
    def __init__(self, acl_resource, params):
        self._acl_resource = acl_resource
        self.params = params
        self._model_width = params['width']
        self._model_height = params['height']

        assert 'model_dir' in params and params[
            'model_dir'] is not None, 'Review your param: model_dir'
        assert os.path.exists(
            params['model_dir']), "Model directory doesn't exist {}".format(
                params['model_dir'])

        # load model from path, and get model ready for inference
        self.model = Model(acl_resource, params['model_dir'])

    def predict(self, img_original):

        #preprocess image to get 'model_input'
        model_input = self.preprocess(img_original)

        # execute model inference
        result = self.model.execute([model_input])

        # postprocessing: use the heatmaps (the second output of model) to get the joins and limbs for human body
        # Note: the model has multiple outputs, here we used a simplified method, which only uses heatmap for body joints
        #       and the heatmap has shape of [1,14], each value correspond to the position of one of the 14 joints.
        #       The value is the index in the 92*92 heatmap (flatten to one dimension)
        heatmaps = result[1]

        # calculate the scale of original image over heatmap, Note: image_original.shape[0] is height
        scale = np.array([
            img_original.shape[1] / heatmap_width,
            img_original.shape[0] / heatmap_height
        ])

        canvas, joint_list = decode_pose(heatmaps[0], scale, img_original)
        return canvas, joint_list

    def preprocess(self, img_original):
        '''
        preprocessing: resize image to model required size, and normalize value between [0,1]
        '''
        scaled_img_data = cv2.resize(img_original,
                                     (self._model_width, self._model_height))
        preprocessed_img = np.asarray(scaled_img_data, dtype=np.float32) / 255.

        return preprocessed_img
    print('Cosine similarity between faces: ', cos_sim)


if __name__ == '__main__':

    model_name = 'sphereface'
    img_file1 = cv2.imread('face1.jpeg')
    img_file2 = cv2.imread('test_human.jpg')

    # initialize acl resource
    acl_resource = AclResource()
    acl_resource.init()

    # load offline model
    MODEL_PATH = model_name + ".om"
    print("MODEL_PATH:", MODEL_PATH)
    model = Model(acl_resource, MODEL_PATH)

    # load image file
    input_image1 = PreProcessing(img_file1)
    print(input_image1.shape)
    input_image2 = PreProcessing(img_file2)
    print(input_image2.shape)

    # om model inference
    resultList1 = model.execute([input_image1])[0].copy()
    resultList2 = model.execute([input_image2])[0].copy()

    #postprocessing to compare results
    PostProcessing(resultList1, resultList2)
Beispiel #19
0
if __name__ == '__main__':

    model_name = 'mask_detection'
    # input image:  simply modify it to the image you are gonna use
    img_file = 'test_img/mask_1.jpg'

    # initialize graph and resource
    acl_resource = AclResource()
    acl_resource.init()

    # model path
    MODEL_PATH = model_name + ".om"
    print("MODEL_PATH:", MODEL_PATH)
    # create mask detection model
    model = Model(acl_resource, MODEL_PATH)

    # original bgr image for plotting
    ori_img = cv2.imread(img_file)
    # image preprocessing: "data" is the preprocesses image for inference, "orig" is the original image
    data, orig = PreProcessing(img_file)

    # om model inference
    resultList = model.execute([data])

    # image postprocessing: decoding model output to bboxes information, using nms to select bboxes. return detected bboxes info: axis, confidence score and category
    result_return = post_process(resultList, orig)

    print("result = ", result_return)

    # plot bbox/label on the image and save
def main():
    if not os.path.exists(OUTPUT_DIR):
        os.mkdir(OUTPUT_DIR)
    if not os.path.exists(OUTPUT_TXT_DIR):
        os.mkdir(OUTPUT_TXT_DIR)
    #acl资源初始化
    acl_resource = AclResource()
    acl_resource.init()
    #加载模型
    model = Model(acl_resource, MODEL_PATH)
    src_dir = os.listdir(INPUT_DIR)
    print("src_dir = ", src_dir)
    #从data目录逐张读取图片进行推理

    t_pre = 0
    t_for = 0
    t_post = 0

    for pic in src_dir:
        #读取图片
        pic_path = os.path.join(INPUT_DIR, pic)
        bgr_img = cv.imread(pic_path)
        #预处理
        t1 = time.time()
        #data, w, h= preprocess(pic_path)
        data, w, h = preprocess_cv2(bgr_img)
        t2 = time.time()
        t_pre += (t2-t1)
        #送进模型推理
        result_list = model.execute([data,]) 
        t3 = time.time()   
        t_for += (t3-t2)
        #处理推理结果
        result_return = post_process(result_list, w, h)
        t4 = time.time()
        t_post += (t4-t3)
        print("result = ", result_return)
        print("preprocess cost:", t2-t1)
        print("forward cost:", t3-t2)
        print("proprocess cost:", t4-t3)
        for i in range(len(result_return['detection_classes'])):
            box = result_return['detection_boxes'][i]
            class_name = result_return['detection_classes'][i]
            confidence = result_return['detection_scores'][i]
            cv.rectangle(bgr_img, (int(box[1]), int(box[0])), (int(box[3]), int(box[2])), colors[i%6], 2)
            p3 = (max(int(box[1]), 15), max(int(box[0]), 15))
            out_label = class_name            
            cv.putText(bgr_img, out_label, p3, cv.FONT_ITALIC, 0.6, colors[i%6], 1)
        output_file = os.path.join(OUTPUT_DIR, "out_" + pic)
        print("output:%s" % output_file)
        cv.imwrite(output_file, bgr_img)

        pic_name = pic.split('.')[0]
        predict_result_path = os.path.join(OUTPUT_TXT_DIR, str(pic_name)+'.txt')
        with open(predict_result_path, 'w') as f:
            for i in range(len(result_return['detection_classes'])):
                box = result_return['detection_boxes'][i]
                class_name = result_return['detection_classes'][i]
                confidence = result_return['detection_scores'][i]
                box = list(map(int, box))
                box = list(map(str, box))
                confidence = '%.4f' % confidence
                bbox_mess = ' '.join([class_name, confidence, box[1], box[0], box[3], box[2]]) + '\n'
                f.write(bbox_mess)
    num = len(src_dir)
    print("avg preprocess cost:", t_pre/num)
    print("avg forward cost:", t_for/num)
    print("avg proprocess cost:", t_post/num)
    total = t_pre/num + t_for/num +  t_post/num
    print("avg total cost:", total)
    print("avg FPS:", 1/(total))
    print("Execute end")
    return facepointList, head_status_string


if __name__ == '__main__':

    model_name_head_pose = 'head_pose_estimation'
    model_name_face_det = 'face_detection'
    img_file = 'face1.jpeg'

    # initialize acl resource
    acl_resource = AclResource()
    acl_resource.init()

    # load offline model for face detection
    MODEL_PATH = model_name_face_det + ".om"
    model_face = Model(acl_resource, MODEL_PATH)

    #load offline model for head pose estimation
    MODEL_PATH = model_name_head_pose + ".om"
    model_head_pose = Model(acl_resource, MODEL_PATH)

    # load image file
    image = cv2.imread(img_file)

    # preprocessing face detection
    input_image = PreProcessing_face(image)

    #face model infer and post processing
    try:
        resultList_face = model_face.execute([input_image]).copy()
        xmin, ymin, xmax, ymax = PostProcessing_face(image, resultList_face)
Beispiel #22
0
class Classify(object):
    def __init__(self, model_path, model_width, model_height):
        self.device_id = 0
        self.context = None
        self.stream = None
        self._model = None
        self.run_mode = None
        self._model_path = model_path
        self._model_width = model_width
        self._model_height = model_height
        self._dvpp = None

    def __del__(self):
        if self._model:
            del self._model
        if self._dvpp:
            del self._dvpp
        if self.stream:
            acl.rt.destroy_stream(self.stream)
        if self.context:
            acl.rt.destroy_context(self.context)
        acl.rt.reset_device(self.device_id)
        acl.finalize()
        print("[Sample]  Sample release source success")

    def _init_resource(self):
        print("[Sample] init resource stage:")

        ret = acl.init()
        check_ret("acl.rt.set_device", ret)
        ret = acl.rt.set_device(self.device_id)
        check_ret("acl.rt.set_device", ret)

        self.context, ret = acl.rt.create_context(self.device_id)
        check_ret("acl.rt.create_context", ret)

        self.stream, ret = acl.rt.create_stream()
        check_ret("acl.rt.create_stream", ret)

        self.run_mode, ret = acl.rt.get_run_mode()
        check_ret("acl.rt.get_run_mode", ret)
        print("Init resource stage success")

    def init(self):
        self._init_resource()
        self._dvpp = Dvpp(self.stream, self.run_mode)

        ret = self._dvpp.init_resource()
        if ret != SUCCESS:
            print("Init dvpp failed")
            return FAILED

        self._model = Model(self.run_mode, self._model_path)
        ret = self._model.init_resource()
        if ret != SUCCESS:
            print("Init model failed")
            return FAILED

        return SUCCESS

    def pre_process(self, image):
        yuv_image = self._dvpp.jpegd(image)
        print("decode jpeg end")
        resized_image = self._dvpp.resize(yuv_image, self._model_width,
                                          self._model_height)
        print("resize yuv end")
        return resized_image

    def inference(self, resized_image):
        return self._model.execute(resized_image.data(), resized_image.size)

    def post_process(self, infer_output, image_file):
        print("post process")
        data = infer_output[0]
        vals = data.flatten()
        top_k = vals.argsort()[-1:-6:-1]
        object_class = get_image_net_class(top_k[0])
        output_path = os.path.join(os.path.join(SRC_PATH, "../outputs/"),
                                   'out_' + os.path.basename(image_file))
        origin_img = Image.open(image_file)
        draw = ImageDraw.Draw(origin_img)
        font = ImageFont.load_default()
        font.size = 50
        draw.text((10, 50), object_class, font=font, fill=255)
        origin_img.save(output_path)
        object_class = get_image_net_class(top_k[0])
        return object_class
Beispiel #23
0
 def __init__(self, model_path, model_width, model_height):
     self._model_path = model_path
     self._model_width = model_width
     self._model_height = model_height
     self._model = Model(model_path)
Beispiel #24
0
class Sample(object):
    def __init__(self, device_id, model_path, vdec_out_path, model_input_width,
                 model_input_height):
        self.device_id = device_id  # int
        self.model_path = model_path  # string
        self.context = None  # pointer
        self.stream = None
        self.model_input_width = model_input_width
        self.model_input_height = model_input_height,
        self.init_resource()
        self.model_process = Model(self.context, self.stream, model_path)
        self.vdec_process = Vdec(self.context, self.stream, vdec_out_path)
        self.dvpp_process = Dvpp(self.stream, model_input_width,
                                 model_input_height)
        self.model_input_width = model_input_width
        self.model_input_height = model_input_height
        self.vdec_out_path = vdec_out_path

    def init_resource(self):
        print("init resource stage:")
        acl.init()
        ret = acl.rt.set_device(self.device_id)
        check_ret("acl.rt.set_device", ret)

        self.context, ret = acl.rt.create_context(self.device_id)
        check_ret("acl.rt.create_context", ret)

        self.stream, ret = acl.rt.create_stream()
        check_ret("acl.rt.create_stream", ret)
        print("init resource stage success")

    def release_resource(self):
        print('[Sample] release source stage:')
        if self.dvpp_process:
            del self.dvpp_process

        if self.model_process:
            del self.model_process

        if self.vdec_process:
            del self.vdec_process

        if self.stream:
            ret = acl.rt.destroy_stream(self.stream)
            check_ret("acl.rt.destroy_stream", ret)

        if self.context:
            ret = acl.rt.destroy_context(self.context)
            check_ret("acl.rt.destroy_context", ret)

        ret = acl.rt.reset_device(self.device_id)
        check_ret("acl.rt.reset_device", ret)
        ret = acl.finalize()
        check_ret("acl.finalize", ret)
        print('[Sample] release source stage success')

    def _transfer_to_device(self, img):
        img_device = img["buffer"]
        img_buffer_size = img["size"]
        '''
        if the buffer is not in device, need to copy to device, but here, the data is from vdec, no need to copy.
        '''
        return img_device, img_buffer_size

    def forward(self, temp):
        _, input_width, input_height, _ = temp

        # vdec process,note:the input is h264 file,vdec output datasize need to be computed by strided width and height by 16*2
        self.vdec_process.run(temp)

        images_buffer = self.vdec_process.get_image_buffer()
        if images_buffer:
            for img_buffer in images_buffer:
                img_device, img_buffer_size = \
                    self._transfer_to_device(img_buffer)

                print("vdec output, img_buffer_size = ", img_buffer_size)
                # vpc process, parameters:vdec output buffer and size, original picture width and height.
                dvpp_output_buffer, dvpp_output_size = \
                    self.dvpp_process.run(img_device,
                                          img_buffer_size,
                                          input_width,
                                          input_height)

                ret = acl.media.dvpp_free(img_device)
                check_ret("acl.media.dvpp_free", ret)

                self.model_process.run(dvpp_output_buffer, dvpp_output_size)
Beispiel #25
0
class Cartoonization(object):
    def __init__(self, model_path, model_width, model_height):
        self.device_id = 0
        self.context = None
        self.stream = None
        self._model_path = model_path
        self._model_width = model_width
        self._model_height = model_height
        self._dvpp = None

    def __del__(self):
        if self._model:
            del self._model
        if self._dvpp:
            del self._dvpp
        if self.stream:
            acl.rt.destroy_stream(self.stream)
        if self.context:
            acl.rt.destroy_context(self.context)
        acl.rt.reset_device(self.device_id)
        acl.finalize()
        print("[Sample] class Samle release source success")

    def _init_resource(self):
        print("[Sample] init resource stage:")
        ret = acl.init()
        check_ret("acl.rt.set_device", ret)

        ret = acl.rt.set_device(self.device_id)
        check_ret("acl.rt.set_device", ret)

        self.context, ret = acl.rt.create_context(self.device_id)
        check_ret("acl.rt.create_context", ret)

        self.stream, ret = acl.rt.create_stream()
        check_ret("acl.rt.create_stream", ret)

        self.run_mode, ret = acl.rt.get_run_mode()
        check_ret("acl.rt.get_run_mode", ret)

        print("[Sample] Init resource stage success")

    def init(self):
        # init acl resource
        self._init_resource() 
        self._dvpp = Dvpp(self.stream, self.run_mode)

        # init dvpp
        ret = self._dvpp.init_resource()
        if ret != SUCCESS:
            print("Init dvpp failed")
            return FAILED
        
        # load model
        self._model = Model(self.run_mode, self._model_path)
        ret = self._model.init_resource()
        if ret != SUCCESS:
            print("Init model failed")
            return FAILED
        return SUCCESS

    def pre_process(self, image):
        yuv_image = self._dvpp.jpegd(image)
        crop_and_paste_image = \
            self._dvpp.crop_and_paste(yuv_image, image.width, image.height, self._model_width, self._model_height)
        print("[Sample] crop_and_paste yuv end")
        return crop_and_paste_image

    def inference(self, resized_image):
        return self._model.execute(resized_image.data(), resized_image.size)

    def post_process(self, infer_output, image_file, origin_image):
        print("[Sample] post process")
        data = ((np.squeeze(infer_output[0]) + 1) * 127.5)
        img = cv2.cvtColor(data, cv2.COLOR_RGB2BGR)
        img = cv2.resize(img, (origin_image.width, origin_image.height))
        output_path = os.path.join("../outputs", os.path.basename(image_file))
        cv2.imwrite(output_path, img)
Beispiel #26
0
class Classify(object):
    def __init__(self, model_path, model_width, model_height):
        self.device_id = 0
        self.context = None
        self.stream = None
        self._model_path = model_path
        self._model_width = model_width
        self._model_height = model_height
        self._dvpp = None

    def __del__(self):
        if self._model:
            del self._model
        if self._dvpp:
            del self._dvpp
        if self.stream:
            acl.rt.destroy_stream(self.stream)
        if self.context:
            acl.rt.destroy_context(self.context)
        acl.rt.reset_device(self.device_id)
        acl.finalize()
        print("[Sample] class Samle release source success")

    def destroy(self):
        self.__del__

    def _init_resource(self):
        print("[Sample] init resource stage:")
        #ret = acl.init()
        #check_ret("acl.rt.set_device", ret)

        ret = acl.rt.set_device(self.device_id)
        check_ret("acl.rt.set_device", ret)

        self.context, ret = acl.rt.create_context(self.device_id)
        check_ret("acl.rt.create_context", ret)

        self.stream, ret = acl.rt.create_stream()
        check_ret("acl.rt.create_stream", ret)

        self.run_mode, ret = acl.rt.get_run_mode()
        check_ret("acl.rt.get_run_mode", ret)

        print("Init resource stage success")

    def init(self):

        self._init_resource()
        self._dvpp = Dvpp(self.stream, self.run_mode)

        ret = self._dvpp.init_resource()
        if ret != SUCCESS:
            print("Init dvpp failed")
            return FAILED

        self._model = Model(self.run_mode, self._model_path)
        ret = self._model.init_resource()
        if ret != SUCCESS:
            print("Init model failed")
            return FAILED

        return SUCCESS

    def pre_process(self, image):
        yuv_image = self._dvpp.jpegd(image)
        print("decode jpeg end")
        resized_image = self._dvpp.resize(yuv_image, self._model_width,
                                          self._model_height)
        print("resize yuv end")
        return resized_image

    def inference(self, resized_image):
        return self._model.execute(resized_image.data(), resized_image.size)

    def post_process(self, infer_output, image_file):
        print("post process")
        data = infer_output[0]
        vals = data.flatten()
        top_k = vals.argsort()[-1:-6:-1]
        print("images:{}".format(image_file))
        print("======== top5 inference results: =============")
        for n in top_k:
            object_class = get_image_net_class(n)
            print("label:%d  confidence: %f, class: %s" %
                  (n, vals[n], object_class))
        object_class = get_image_net_class(top_k[0])

        return object_class
Beispiel #27
0
 def __init__(self, acl_resource, model_path, model_width, model_height):
     self._model_path = model_path
     self._model_width = model_width
     self._model_height = model_height
     self._dvpp = Dvpp(acl_resource)
     self._model = Model(model_path)
Beispiel #28
0
    return image_new


if __name__ == '__main__':

    # model name, input image file, and confidence score for bboxes
    model_name = 'vgg_ssd'
    img_file = 'test_img/ship.jpg'
    conf_thres = 0.85
    # initialize graph and resource
    acl_resource = AclResource()
    acl_resource.init()

    # create model
    MODEL_PATH = model_name + ".om"
    print("MODEL_PATH:", MODEL_PATH)
    model = Model(acl_resource, MODEL_PATH)

    # load image file
    image = cv2.imread(img_file)

    #image preprocessing
    input_image = PreProcessing(image)

    # model inference
    resultList = model.execute([input_image])

    # postprocessing: select bboxes with scores higher than confidence score(adjustable), plot bboxes/labels on the image and save to output dir
    PostProcessing(resultList, conf_thres)
Beispiel #29
0
def main():
    if not os.path.exists(OUTPUT_DIR):
        os.mkdir(OUTPUT_DIR)
    if not os.path.exists(OUTPUT_TXT_DIR):
        os.mkdir(OUTPUT_TXT_DIR)
    #acl资源初始化
    acl_resource = AclResource()
    acl_resource.init()
    #加载模型
    model = Model(acl_resource, MODEL_PATH)
    src_dir = os.listdir(INPUT_DIR)
    print("src_dir = ", src_dir)
    #从data目录逐张读取图片进行推理
    for pic in src_dir:
        #读取图片
        pic_path = os.path.join(INPUT_DIR, pic)
        pic_name = pic.split('.')[0]
        print(pic_name)
        bgr_img = cv2.imread(pic_path)

        t1 = time.time()
        img, ratio = letterbox(bgr_img,
                               new_shape=(320, 640))  # resize to (320,640,3)
        img = img[:, :, ::-1]  #bgr to rgb
        img = img.transpose(2, 0, 1)  #(3,320,640)
        img = np.ascontiguousarray(img)
        img = img.astype(np.float32)
        img = img / 255.0

        data = np.concatenate((img[:, ::2, ::2], img[:, 1::2, ::2],
                               img[:, ::2, 1::2], img[:, 1::2, 1::2]),
                              axis=0)  #[12,160,320]
        t2 = time.time()
        result_list = model.execute([
            data,
        ])
        t3 = time.time()
        post = yolov5_post(result_list)  #[1,25200,12]
        result_return = non_max_suppression(post,
                                            conf_thres=conf_threshold,
                                            iou_thres=iou_threshold)
        if len(result_return['detection_classes']):
            det = np.array(result_return['detection_boxes'])[:, :4]
            bbox = scale_coords((320, 640), det, bgr_img.shape, ratio)
        t4 = time.time()
        print("result = ", result_return)
        print("preprocess cost:", t2 - t1)
        print("forward cost:", t3 - t2)
        print("postprocess cost:", t4 - t3)
        print("total cost:", t4 - t1)
        print("FPS:", 1 / (t4 - t1))

        for i in range(len(result_return['detection_classes'])):
            box = bbox[i]
            class_name = result_return['detection_classes'][i]
            confidence = result_return['detection_scores'][i]
            cv2.rectangle(bgr_img, (int(box[0]), int(box[1])),
                          (int(box[2]), int(box[3])), colors[i % 6])
            p3 = (max(int(box[0]), 15), max(int(box[1]), 15))
            out_label = class_name
            cv2.putText(bgr_img, out_label, p3, cv2.FONT_ITALIC, 0.6,
                        colors[i % 6], 1)
        output_file = os.path.join(OUTPUT_DIR, "out_" + pic)
        print("output:%s" % output_file)
        cv2.imwrite(output_file, bgr_img)

        pic_name = pic.split('.')[0]
        predict_result_path = os.path.join(OUTPUT_TXT_DIR,
                                           str(pic_name) + '.txt')
        with open(predict_result_path, 'w') as f:
            for i in range(len(result_return['detection_classes'])):
                box = bbox[i]
                class_name = result_return['detection_classes'][i]
                confidence = result_return['detection_scores'][i]
                box = list(map(int, box))
                box = list(map(str, box))
                confidence = '%.4f' % confidence
                bbox_mess = ' '.join([
                    class_name, confidence, box[0], box[1], box[2], box[3]
                ]) + '\n'
                f.write(bbox_mess)
    print("Execute end")
Beispiel #30
0
class Sample(object):
    """
    样例入口
    """
    def __init__(self,
                 device_id,
                 model_path,
                 model_input_width,
                 model_input_height):
        self.device_id = device_id      # int
        self.model_path = model_path    # string
        self.model_id = None            # pointer
        self.context = None             # pointer

        self.input_data = None
        self.output_data = None
        self.model_desc = None          # pointer when using
        self.load_input_dataset = None
        self.load_output_dataset = None
        self.init_resource()

        self._model_input_width = model_input_width
        self._model_input_height = model_input_height

        self.model_process = Model(self.context,
                                   self.stream,
                                   self.model_path)

        self.dvpp_process = Dvpp(self.stream,
                                 model_input_width,
                                 model_input_height)

        self.sing_op = SingleOp(self.stream)

    def release_resource(self):
        if self.model_process:
            del self.model_process

        if self.dvpp_process:
            del self.dvpp_process

        if self.sing_op:
            del self.sing_op

        if self.stream:
            acl.rt.destroy_stream(self.stream)

        if self.context:
            acl.rt.destroy_context(self.context)
        acl.rt.reset_device(self.device_id)
        acl.finalize()
        print("[Sample] class Samle release source success")

    def init_resource(self):
        print("[Sample] init resource stage:")
        acl.init()
        ret = acl.rt.set_device(self.device_id)
        check_ret("acl.rt.set_device", ret)

        self.context, ret = acl.rt.create_context(self.device_id)
        check_ret("acl.rt.create_context", ret)

        self.stream, ret = acl.rt.create_stream()
        check_ret("acl.rt.create_stream", ret)
        print("[Sample] init resource stage success")

    def _transfer_to_device(self, img_path, dtype=np.uint8):
        img = np.fromfile(img_path, dtype=dtype)
        if "bytes_to_ptr" in dir(acl.util):
            bytes_data = img.tobytes()
            img_ptr = acl.util.bytes_to_ptr(bytes_data)
        else:
            img_ptr = acl.util.numpy_to_ptr(img)
        img_buffer_size = img.itemsize * img.size
        img_device, ret = acl.media.dvpp_malloc(img_buffer_size)
        check_ret("acl.media.dvpp_malloc", ret)
        ret = acl.rt.memcpy(img_device,
                            img_buffer_size,
                            img_ptr,
                            img_buffer_size,
                            ACL_MEMCPY_HOST_TO_DEVICE)
        check_ret("acl.rt.memcpy", ret)

        return img_device, img_buffer_size

    def forward(self, img_dict):
        img_path, _ = img_dict["path"], img_dict["dtype"]
        # copy images to device
        with Image.open(img_path) as image_file:
            width, height = image_file.size
            print("[Sample] width:{} height:{}".format(width, height))
            print("[Sample] image:{}".format(img_path))
        img_device, img_buffer_size = \
            self._transfer_to_device(img_path, img_dict["dtype"])

        # decode and resize
        dvpp_output_buffer, dvpp_output_size = \
            self.dvpp_process.run(img_device,
                                  img_buffer_size,
                                  width,
                                  height)
        self.model_process.run(
            dvpp_output_buffer,
            dvpp_output_size)
        self.sing_op.run(self.model_process.get_result())
        if img_device:
            acl.media.dvpp_free(img_device)