Exemplo n.º 1
0
def main():
    """main"""
    #acl init
    if (len(sys.argv) != 2):
        print("The App arg is invalid")
        exit(1)
    acl_resource = AclLiteResource()
    acl_resource.init()
    model = AclLiteModel(MODEL_PATH)
    dvpp = AclLiteImageProc(acl_resource)

    #From the parameters of the picture storage directory, reasoning by a picture
    image_dir = sys.argv[1]
    images_list = [os.path.join(image_dir, img)
                   for img in os.listdir(image_dir)
                   if os.path.splitext(img)[1] in const.IMG_EXT]

    if not os.path.isdir(os.path.join(SRC_PATH, "../out")):
        os.mkdir(os.path.join(SRC_PATH, "../out"))

    #infer picture
    for pic in images_list:
        #get pic data
        orig_shape, l_data = preprocess(pic)
        #inference
        result_list = model.execute([l_data])    
        #postprocess
        postprocess(result_list, pic, orig_shape, pic)
    print("Execute end")
Exemplo n.º 2
0
def main():
    """
    acl resource initialization
    """
    if not os.path.exists(OUTPUT_DIR):
        os.mkdir(OUTPUT_DIR)
    #ACL resource initialization
    acl_resource = AclLiteResource()
    acl_resource.init()

    model = AclLiteModel(model_path)
    images_list = [
        os.path.join(INPUT_DIR, img) for img in os.listdir(INPUT_DIR)
        if os.path.splitext(img)[1] in IMG_EXT
    ]

    for pic in images_list:

        orig_shape, orig_l, l_data = preprocess(pic)
        result_list = model.execute([
            l_data,
        ])
        postprocess(result_list, pic, orig_shape, orig_l)
        break
    print("Execute end")
Exemplo n.º 3
0
def main():
    """
    Program execution with picture directory parameters
    """
    if (len(sys.argv) != 2):
        print("The App arg is invalid")
        exit(1)

    acl_resource = AclLiteResource()
    acl_resource.init()
    model = AclLiteModel(MODEL_PATH)
    dvpp = AclLiteImageProc(acl_resource)

    #From the parameters of the picture storage directory, reasoning by a picture
    image_dir = sys.argv[1]
    images_list = [
        os.path.join(image_dir, img) for img in os.listdir(image_dir)
        if os.path.splitext(img)[1] in const.IMG_EXT
    ]
    #Create a directory to store the inference results
    if not os.path.isdir('../out'):
        os.mkdir('../out')

    image_info = construct_image_info()

    for image_file in images_list:
        #read picture
        image = AclLiteImage(image_file)
        #preprocess image
        resized_image = pre_process(image, dvpp)
        print("pre process end")
        #reason pictures
        result = model.execute([resized_image, image_info])
        #process resresults
        post_process(result, image, image_file)
Exemplo n.º 4
0
def main():
    """
    Program execution
    """
    if not os.path.exists(OUTPUT_DIR):
        os.mkdir(OUTPUT_DIR)

    acl_resource = AclLiteResource()  # acl intialize
    acl_resource.init()

    model = AclLiteModel(model_path)  # load model

    src_dir = os.listdir(INPUT_DIR)
    for pic in src_dir:
        if not pic.lower().endswith(('.bmp', '.dib', '.png', '.jpg', '.jpeg',
                                     '.pbm', '.pgm', '.ppm', '.tif', '.tiff')):
            print('it is not a picture, %s, ignore this file and continue,' %
                  pic)
            continue
        pic_path = os.path.join(INPUT_DIR, pic)
        RGB_image, o_h, o_w = pre_process(pic_path)  # preprocess

        start_time = time.time()
        result_list = model.execute([
            RGB_image,
        ])  # inferring
        end_time = time.time()
        print('pic:{}'.format(pic))
        print('pic_size:{}x{}'.format(o_h, o_w))
        print('time:{}ms'.format(int((end_time - start_time) * 1000)))
        print('\n')
        post_process(result_list, pic, o_h, o_w)  # postprocess
    print('task over')
Exemplo n.º 5
0
def main():
    if (len(sys.argv) != 2):
        print("The App arg is invalid")
        exit(1)

    acl_resource = AclLiteResource()
    acl_resource.init()
    model = AclLiteModel(MODEL_PATH)
    dvpp = AclLiteImageProc(acl_resource)

    image_dir = sys.argv[1]
    images_list = [
        os.path.join(image_dir, img) for img in os.listdir(image_dir)
        if os.path.splitext(img)[1] in const.IMG_EXT
    ]

    #Create a directory to store the inference results
    if not os.path.isdir(os.path.join(SRC_PATH, "../out")):
        os.mkdir(os.path.join(SRC_PATH, "../out"))

    image_info = construct_image_info()
    for image_file in images_list:
        image = AclLiteImage(image_file)
        resized_image = pre_process(image, dvpp)
        print("pre process end")

        result = model.execute([
            resized_image,
        ])
        post_process(result, image_file)

        print("process " + image_file + " end")
Exemplo n.º 6
0
def preandinfer(image_queue, images_list):
    acl_start = time.time()
    #print('Start preandinfer ')
    if not os.path.exists(OUTPUT_DIR):
        os.mkdir(OUTPUT_DIR)
    acl_resource = AclLiteResource()
    acl_resource.init()
    dvpp_ = AclLiteImageProc()
    model = AclLiteModel(model_path)
    print('------------------------------acl processing time',
          time.time() - acl_start)
    for pic in images_list:
        image = AclLiteImage(pic)
        image_dvpp = image.copy_to_dvpp()
        yuv_image = dvpp_.jpegd(image_dvpp)
        resized_image = dvpp_.resize(yuv_image, MODEL_WIDTH, MODEL_HEIGHT)
        result_list = model.execute([
            resized_image,
        ])
        data = ProcData(result_list, pic, OUTPUT_DIR)
        image_queue.put(data)
    post_num = 6
    while (post_num):
        post_num -= 1
        data = "Post process thread exit"
        image_queue.put(data)
    print('End preandinfer')
    print('------------------------------preandinfer time',
          time.time() - acl_start)
Exemplo n.º 7
0
def main():
    """
    main
    """
    if (len(sys.argv) != 2):
        print("The App arg is invalid")
        exit(1)

    acl_resource = AclLiteResource()
    acl_resource.init()
    model = AclLiteModel(MODEL_PATH)

    data_dir = sys.argv[1]
    data_list = [
        os.path.join(data_dir, testdata) for testdata in os.listdir(data_dir)
        if os.path.splitext(testdata)[1] in ['.bin']
    ]

    #Create a directory to store the inference results
    if not os.path.isdir(os.path.join(SRC_PATH, "../out")):
        os.mkdir(os.path.join(SRC_PATH, "../out"))

    for data_file in data_list:
        data_raw = np.fromfile(data_file, dtype=np.float32)
        input_data = data_raw.reshape(16, MODEL_WIDTH, MODEL_HEIGHT, 3).copy()
        result = model.execute([
            input_data,
        ])
        post_process(result, data_file)

    print("process  end")
Exemplo n.º 8
0
def main():
    """
    acl resource initialization
    """
    if not os.path.exists(OUTPUT_DIR):
        os.mkdir(OUTPUT_DIR)
    #ACL resource initialization
    acl_resource = AclLiteResource()
    acl_resource.init()

    model = AclLiteModel(model_path)
    images_list = [
        os.path.join(INPUT_DIR, img) for img in os.listdir(INPUT_DIR)
        if os.path.splitext(img)[1] in IMG_EXT
    ]

    for pic in images_list:
        print("pic: ", pic)

        bgr_img = cv.imread(pic).astype(np.float32)
        data = preprocess(bgr_img)

        result_list = model.execute([data])

        postprocess(result_list, pic, bgr_img)

    print("Execute end")
Exemplo n.º 9
0
class Classify(object):
    """
    Classify
    """
    def __init__(self, model_path, model_width, model_height):
        self._model_path = model_path
        self._model_width = model_width
        self._model_height = model_height
        self._model = AclLiteModel(model_path)

    @display_time
    def pre_process(self, image):
        """
        pre_process
        """
        bgr_img = cv2.imread(image).astype(np.float32)
        bgr_img = bgr_img / 255.0
        resized_image = cv2.resize(bgr_img, (299, 299))
        return resized_image

    @display_time
    def inference(self, resized_image):
        """
        inference
        """
        return self._model.execute([
            resized_image,
        ])

    @display_time
    def post_process(self, infer_output, image_file):
        """
        post_process
        """
        print("post process")
        data = infer_output[0]
        vals = data.flatten()
        top_k = vals.argsort()[-1:-7:-1]
        print("images:{}".format(image_file))
        print("======== top5 inference results: =============")
        for n in top_k:
            object_class = get_image_net_class(n)
            print("label:%d  confidence: %f, class: %s" %
                  (n, vals[n], object_class))

        #using pillow, the category with the highest confidence is written on the image and saved locally
        if len(top_k):
            object_class = get_image_net_class(top_k[0])
            object_value = vals[top_k[0]]
            output_path = os.path.join(os.path.join(SRC_PATH, "../out"),
                                       os.path.basename(image_file))
            origin_img = cv2.imread(image_file)
            font = cv2.FONT_HERSHEY_SIMPLEX
            origin_img = cv2.putText(origin_img, object_class, (10, 100), font,
                                     3, (255, 255, 255), 3)
            origin_img = cv2.putText(origin_img, str(object_value), (10, 200),
                                     font, 2, (255, 255, 255), 3)
            cv2.imwrite(output_path, origin_img)
Exemplo n.º 10
0
class Classify(object):
    """
    Class for portrait segmentation
    """
    def __init__(self, model_path, model_width, model_height):
        self._model_path = model_path
        self._model_width = model_width
        self._model_height = model_height
        self._img_width = 0
        self._img_height = 0
        self._model = None
        self._dvpp = None

    def init(self):
        """
        Initialize
        """
        self._dvpp = AclLiteImageProc()
        # Load model
        self._model = AclLiteModel(self._model_path)

        return const.SUCCESS

    @utils.display_time
    def pre_process(self, image):
        """
        preprocess 
        """
        image_dvpp = image.copy_to_dvpp()
        yuv_image = self._dvpp.jpegd(image_dvpp)
        resized_image = self._dvpp.resize(yuv_image, self._model_width,
                                          self._model_height)
        return resized_image

    @utils.display_time
    def inference(self, input_data):
        """
        model inference
        """
        return self._model.execute(input_data)

    @utils.display_time
    def post_process(self, infer_output, image_file):
        """
        Post-processing, analysis of inference results
        """
        output_path = os.path.join(OUTPUT_DIR, os.path.basename(image_file))
        infer_result = infer_output[0]
        vals = infer_result.flatten()
        pre_index = vals.argsort()[-1]

        origin_img = Image.open(image_file)
        draw = ImageDraw.Draw(origin_img)
        font = ImageFont.truetype(
            "/usr/share/fonts/truetype/dejavu/DejaVuSans-Bold.ttf", size=20)
        draw.text((10, 50), CLS[pre_index], font=font, fill=255)
        origin_img.save(output_path)
Exemplo n.º 11
0
class Cartoonization(object):
    """
    class for Cartoonization
    """
    def __init__(self, model_path, model_width, model_height):
        self._model_path = model_path
        self._model_width = model_width
        self._model_height = model_height
        self.device_id = 0
        self._model = None
        self._dvpp = None

    def init(self):
        """
        Initialize
        """
        self._dvpp = AclLiteImageProc()
        # Load model
        self._model = AclLiteModel(self._model_path)

        return const.SUCCESS

    @utils.display_time
    def pre_process(self, image, size=[256, 256]):
        """
        image preprocess
        """
        image_dvpp = image.copy_to_dvpp()
        yuv_image = self._dvpp.jpegd(image_dvpp)
        crop_and_paste_image = self._dvpp.crop_and_paste_get_roi(yuv_image, image.width, image.height, \
                                self._model_width, self._model_height)
        return crop_and_paste_image

    def inference(self, resized_image):
        """
        model inference
        """
        return self._model.execute(resized_image)

    @utils.display_time
    def post_process(self, infer_output, image_file):
        """
        post process
        """
        origin_image = cv2.imread(image_file).astype(np.float32)
        h, w = origin_image.shape[:2]

        image = ((np.squeeze(infer_output[0]) + 1) / 2 * 255)
        image = np.clip(image, 0, 255).astype(np.uint8)
        image = cv2.resize(image, (w, h))

        output_path = os.path.join("../out", os.path.basename(image_file))
        cv2.imwrite(output_path, cv2.cvtColor(image, cv2.COLOR_BGR2RGB))
Exemplo n.º 12
0
class Seg(object):
    """
    Class for portrait segmentation
    """
    def __init__(self, model_path, model_width, model_height):
        self._model_path = model_path
        self._model_width = model_width
        self._model_height = model_height
        self.device_id = 0
        self._dvpp = None
        self._model = None

    def init(self):
        """
        Initialize
        """
        # Initialize dvpp
        self._dvpp = AclLiteImageProc()

        # Load model
        self._model = AclLiteModel(self._model_path)

        return const.SUCCESS

    @utils.display_time
    def pre_process(self, image):
        """
        image preprocess
        """
        image_dvpp = image.copy_to_dvpp()
        yuv_image = self._dvpp.jpegd(image_dvpp)
        resized_image = self._dvpp.resize(yuv_image, self._model_width,
                                          self._model_height)
        return resized_image

    @utils.display_time
    def inference(self, input_data):
        """
        model inference
        """
        return self._model.execute(input_data)

    @utils.display_time
    def post_process(self, infer_output, image_name):
        """
        get mask
        """
        data = infer_output[0]
        vals = data.flatten()
        mask = np.clip((vals * 255), 0, 255)
        mask = mask.reshape(224, 224, 2)
        cv2.imwrite(os.path.join(MASK_DIR, image_name), mask[:, :, 0])
        return mask
Exemplo n.º 13
0
def main():
    """
    acl resource initialization
    """
    acl_resource = AclLiteResource()
    acl_resource.init()
    #load model
    model = AclLiteModel(model_path)
    chan = presenter_channel.open_channel(COLORIZATION_CONF)
    if chan is None:
        print("Open presenter channel failed")
        return

    lenofUrl = len(sys.argv)

    if lenofUrl <= 1:
        print("[ERROR] Please input mp4/Rtsp URL")
        exit()
    elif lenofUrl >= 3:
        print("[ERROR] param input Error")
        exit()
    URL = sys.argv[1]
    URL1 = re.match('rtsp://', URL)
    URL2 = re.search('.mp4', URL)

    if URL1 is None and URL2 is None:
        print("[ERROR] should input correct URL")
        exit()

    cap = cv.VideoCapture(URL)
    #Gets the total frames
    frames_num = cap.get(7)
    currentFrames = 0

    while True:
        #read image
        ret, frame = cap.read()
        if ret is not True:
            print("read None image, break")
            break

        if currentFrames == frames_num - 1:
            currentFrames = 0
            cap.set(1, 0)

        currentFrames += 1
        #Gets the L channel value
        orig_shape, orig_l, l_data = preprocess(frame)
        result_list = model.execute([
            l_data,
        ])
        result_jpeg = postprocess(result_list, orig_shape, orig_l)
        chan.send_image(orig_shape[0], orig_shape[1], result_jpeg)
Exemplo n.º 14
0
class Cartoonization(object):
    """
    class for Cartoonization
    """
    def __init__(self, model_path, model_width, model_height):
        self._model_path = model_path
        self._model_width = model_width
        self._model_height = model_height
        self.device_id = 0
        self._dvpp = None
        self._model = None

    def init(self):
        """
        Initialize
        """
        # Initialize dvpp
        self._dvpp = AclLiteImageProc()

        # Load model
        self._model = AclLiteModel(self._model_path)

        return const.SUCCESS

    @utils.display_time
    def pre_process(self, image):
        """
        image preprocess
        """
        image_dvpp = image.copy_to_dvpp()
        yuv_image = self._dvpp.jpegd(image_dvpp)
        crop_and_paste_image = self._dvpp.crop_and_paste_get_roi(yuv_image, image.width, image.height, \
                                self._model_width, self._model_height)
        return crop_and_paste_image

    @utils.display_time
    def inference(self, resized_image):
        """
        model inference
        """
        return self._model.execute(resized_image)

    @utils.display_time
    def post_process(self, infer_output, image_file, origin_image):
        """
        post process
        """
        data = ((np.squeeze(infer_output[0]) + 1) * 127.5)
        img = cv2.cvtColor(data, cv2.COLOR_RGB2BGR)
        img = cv2.resize(img, (origin_image.width, origin_image.height))
        output_path = os.path.join("../out", os.path.basename(image_file))
        cv2.imwrite(output_path, img)
Exemplo n.º 15
0
def main():
    acl_resource = AclLiteResource()
    acl_resource.init()

    detect = VggSsd(acl_resource, MODEL_WIDTH, MODEL_HEIGHT)
    model = AclLiteModel(MODEL_PATH)
    chan = presenter_channel.open_channel(MASK_DETEC_CONF)
    if chan is None:
        print("Open presenter channel failed")
        return

    lenofUrl = len(sys.argv)
    if lenofUrl <= 1:
        print("[ERROR] Please input h264/Rtsp URL")
        exit()
    elif lenofUrl >= 3:
        print("[ERROR] param input Error")
        exit()
    URL = sys.argv[1]
    URL1 = re.match('rtsp://', URL)
    URL2 = re.search('.h264', URL)

    if URL1 is None and URL2 is None:
        print("[ERROR] should input correct URL")
        exit()
    cap = video.VideoCapture(URL)

    while True:
        # Read a frame
        ret, image = cap.read()
        if (ret != 0) or (image is None):
            print("read None image, break")
            break
        #pre process
        model_input = detect.pre_process(image)
        if model_input is None:
            print("Pre process image failed")
            break
        # inference
        result = model.execute(model_input)
        if result is None:
            print("execute mode failed")
            break
        # post process
        jpeg_image, detection_list = detect.post_process(result, image)
        if jpeg_image is None:
            print("The jpeg image for present is None")
            break
        chan.send_detection_data(CAMERA_FRAME_WIDTH, CAMERA_FRAME_HEIGHT,
                                 jpeg_image, detection_list)
Exemplo n.º 16
0
class Classify(object):
    """
    Class for portrait segmentation
    """
    def __init__(self, model_path):
        self._model_path = model_path
        self._model = None

    def init(self):
        """
        Initialize
        """
        # Load model
        self._model = AclLiteModel(self._model_path)

        return const.SUCCESS

    def pre_process(self, img):
        """
        preprocess 
        """
        img = cv2.resize(img, (56, 56))
        img = img.astype(np.float32) / 255.0
        processed_img = np.expand_dims(img, axis=0)
        return processed_img

    def inference(self, input_data):
        """
        model inference
        """
        return self._model.execute(input_data)

    def post_process(self, infer_output):
        """
        Post-processing, analysis of inference results
        """
        infer_result = infer_output[0]
        vals = infer_result.flatten()
        pre_index = vals.argsort()[-1]
        return pre_index

    def process(self, input_image):
        """
        complete process
        """
        processed_img = self.pre_process(input_image)
        infer_output = self.inference(processed_img)
        result = self.post_process(infer_output)
        return result
Exemplo n.º 17
0
class Yolov3(object):
    """yolov3"""
    def __init__(self, acl_resource, model_path, model_width, model_height):
        self._acl_resource = acl_resource
        self._model_width = model_width
        self._model_height = model_height
        self._model = AclLiteModel(model_path)

    def __del__(self):
        if self._model:
            del self._model

    def construct_image_info(self):
        """construct"""
        image_info = np.array([self._model_width, self._model_height,
                       self._model_width, self._model_height],
                       dtype = np.float32)
        return image_info

    def execute(self, data):
        """execute"""
        image_info = self.construct_image_info()
        return self._model.execute([data.resized_image, image_info])       
 
    def post_process(self, infer_output, data):
        """post"""
        print("infer output shape is : ", infer_output[1].shape)
        box_num = int(infer_output[1][0, 0])
        print("box num = ", box_num)
        box_num = infer_output[1][0, 0]
        box_info = infer_output[0].flatten()
        scalex = data.frame_width / self._model_width
        scaley = data.frame_height / self._model_height
        if scalex > scaley:
            scaley =  scalex
        detection_result_list = []
        for n in range(int(box_num)):
            ids = int(box_info[5 * int(box_num) + n])
            label = labels[ids]
            score = box_info[4 * int(box_num)+n]
            lt_x = int(box_info[0 * int(box_num)+n] * scaley)
            lt_y = int(box_info[1 * int(box_num)+n] * scaley)
            rb_x = int(box_info[2 * int(box_num) + n] * scaley)
            rb_y = int(box_info[3 * int(box_num) + n] * scaley)
            print("channel %d inference result: box top left(%d, %d), "
                  "bottom right(%d %d), score %s" % (data.channel, 
                  lt_x, lt_y, rb_x, 
                  rb_y, score))
        return detection_result_list
Exemplo n.º 18
0
class Classify(object):
    def __init__(self, acl_resource, model_path, model_width, model_height):
        self._model_path = model_path
        self._model_width = model_width
        self._model_height = model_height
        self._dvpp = AclLiteImageProc(acl_resource)
        self._model = AclLiteModel(model_path)

    def __del__(self):
        if self._dvpp:
            del self._dvpp
        print("[Sample] class Samle release source success")

    def pre_process(self, image):
        yuv_image = self._dvpp.jpegd(image)
        resized_image = self._dvpp.resize(yuv_image, self._model_width,
                                          self._model_height)
        print("resize yuv end")
        return resized_image

    def inference(self, resized_image):
        return self._model.execute([
            resized_image,
        ])

    def post_process(self, infer_output, image_file):
        print("post process")
        data = infer_output[0]
        vals = data.flatten()
        top_k = vals.argsort()[-1:-6:-1]
        print("images:{}".format(image_file))
        print("======== top5 inference results: =============")
        for n in top_k:
            object_class = get_image_net_class(n)
            print("label:%d  confidence: %f, class: %s" %
                  (n, vals[n], object_class))

        #using pillow, the category with the highest confidence is written on the image and saved locally
        if len(top_k):
            object_class = get_image_net_class(top_k[0])
            output_path = os.path.join(os.path.join(SRC_PATH, "../out"),
                                       os.path.basename(image_file))
            origin_img = Image.open(image_file)
            draw = ImageDraw.Draw(origin_img)
            font = ImageFont.truetype(
                "/usr/share/fonts/truetype/dejavu/DejaVuSans-Bold.ttf",
                size=20)
            draw.text((10, 50), object_class, font=font, fill=255)
            origin_img.save(output_path)
Exemplo n.º 19
0
def main():
    if not os.path.exists(OUTPUT_DIR):
        os.mkdir(OUTPUT_DIR)
    #ACL resource initialization
    acl_resource = AclLiteResource()
    acl_resource.init()
    #load model
    model = AclLiteModel(MODEL_PATH)
    images_list = [os.path.join(INPUT_DIR, img)
                   for img in os.listdir(INPUT_DIR)
                   if os.path.splitext(img)[1] in const.IMG_EXT]
    #Read images from the data directory one by one for reasoning
    for pic in images_list:
        #read image
        bgr_img = cv.imread(pic)
        #preprocess
        data, orig = preprocess(pic)
        #Send into model inference
        result_list = model.execute([data,])
        #Process inference results
        result_return = post_process(result_list, orig)
        print("result = ", result_return)
        #Process lane line
        frame_with_lane = preprocess_frame(bgr_img)
        distance = np.zeros(shape=(len(result_return['detection_classes']), 1))

        for i in range(len(result_return['detection_classes'])):
            box = result_return['detection_boxes'][i]
            class_name = result_return['detection_classes'][i]
            confidence = result_return['detection_scores'][i]
            distance[i] = calculate_position(bbox=box, transform_matrix=perspective_transform,
                        warped_size=WARPED_SIZE, pix_per_meter=pixels_per_meter)
            label_dis = '{} {:.2f}m'.format('dis:', distance[i][0])
            cv.putText(frame_with_lane, label_dis, (int(box[1]) + 10, int(box[2]) + 15), 
                        cv.FONT_ITALIC, 0.6, colors[i % 6], 1)

            cv.rectangle(frame_with_lane, (int(box[1]), int(box[0])), (int(box[3]), int(box[2])), colors[i % 6])
            p3 = (max(int(box[1]), 15), max(int(box[0]), 15))
            out_label = class_name
            cv.putText(frame_with_lane, out_label, p3, cv.FONT_ITALIC, 0.6, colors[i % 6], 1)

        output_file = os.path.join(OUTPUT_DIR, "out_" + os.path.basename(pic))
        print("output:%s" % output_file)
        cv.imwrite(output_file, frame_with_lane)
    print("Execute end")
Exemplo n.º 20
0
def main():
    if not os.path.exists(OUTPUT_DIR):
        os.mkdir(OUTPUT_DIR)
    #ACL resource initialization
    acl_resource = AclLiteResource()
    acl_resource.init()
    #load model
    model = AclLiteModel(MODEL_PATH)
    images_list = [
        os.path.join(INPUT_DIR, img) for img in os.listdir(INPUT_DIR)
        if os.path.splitext(img)[1] in const.IMG_EXT
    ]
    #Read images from the data directory one by one for reasoning
    for pic in images_list:
        #read image
        bgr_img = cv.imread(pic)
        #preprocess
        data, orig = preprocess(pic)
        #Send into model inference
        result_list = model.execute([
            data,
        ])
        #Process inference results
        result_return = post_process(result_list, orig)
        print("result = ", result_return)

        for i in range(len(result_return['detection_classes'])):
            box = result_return['detection_boxes'][i]
            class_name = result_return['detection_classes'][i]
            confidence = result_return['detection_scores'][i]
            cv.rectangle(bgr_img, (int(box[1]), int(box[0])),
                         (int(box[3]), int(box[2])), colors[i % 6])
            p3 = (max(int(box[1]), 15), max(int(box[0]), 15))
            out_label = class_name
            cv.putText(bgr_img, out_label, p3, cv.FONT_ITALIC, 0.6,
                       colors[i % 6], 1)

        output_file = os.path.join(OUTPUT_DIR, "out_" + os.path.basename(pic))
        print("output:%s" % output_file)
        cv.imwrite(output_file, bgr_img)

    print("Execute end")
Exemplo n.º 21
0
class ModelProcessor(object):
    """acl model wrapper"""
    def __init__(self, acl_resource, params):
        self._acl_resource = acl_resource
        self.params = params
        self._model_width = params['width']
        self._model_height = params['height']

        assert 'model_dir' in params and params['model_dir'] is not None, 'Review your param: model_dir'
        assert os.path.exists(params['model_dir']), "Model directory doesn't exist {}".format(params['model_dir'])
            
        # load model from path, and get model ready for inference
        self.model = AclLiteModel(params['model_dir'])

    def predict(self, img_original):
        """run predict"""
        #preprocess image to get 'model_input'
        model_input = self.preprocess(img_original)

        # execute model inference
        result = self.model.execute([model_input]) 
        # postprocessing: use the heatmaps (the output of model) to get the joins and limbs for human body
        # Note: the model has multiple outputs, here we used a simplified method, which only uses heatmap for body joints
        #       and the heatmap has shape of [1,14], each value correspond to the position of one of the 14 joints. 
        #       The value is the index in the 92*92 heatmap (flatten to one dimension)
        heatmaps = result[0]
        # calculate the scale of original image over heatmap, Note: image_original.shape[0] is height
        scale = np.array([img_original.shape[1] / heatmap_width, img_original.shape[0]/ heatmap_height])

        canvas = decode_pose(heatmaps[0], scale, img_original)

        return canvas

    def preprocess(self, img_original):
        """
        preprocessing: resize image to model required size, and normalize value between [0,1]
        """
        scaled_img_data = cv2.resize(img_original, (self._model_width, self._model_height))
        preprocessed_img = np.asarray(scaled_img_data, dtype=np.float32) / 255.
        
        return preprocessed_img
Exemplo n.º 22
0
def main():
    """
    acl resource initialization
    """
    acl_resource = AclLiteResource()
    acl_resource.init()
    
    model = AclLiteModel(model_path)

    with codecs.open(dict_path, 'r', 'utf-8') as reader:
        for line in reader:
            token = line.strip()
            token_dict[token] = len(token_dict) 

    with open(sample_path, "r") as f:
        text = f.read() 

    with open(label_path, "r", encoding="utf-8") as f:
        label_dict = json.loads(f.read())
               
    X1, X2 = preprocess(text)
    X1 = np.ascontiguousarray(X1, dtype='float32')
    X2 = np.ascontiguousarray(X2, dtype='float32')

    X1 = np.expand_dims(X1, 0)
    X2 = np.expand_dims(X2, 0)
    s_time = time.time()
    
    result_list = model.execute([X1, X2])
    e_time = time.time()    
    print(result_list)   
    y = postprocess(result_list)

    if not os.path.exists(output_dir):
        os.mkdir(output_dir)
    save_to_file(output_dir + 'prediction_label.txt', label_dict[str(y)])
    print("Original text: %s" % text)
    print("Prediction label: %s" % label_dict[str(y)])
    print("Cost time:", e_time - s_time)
    print("Execute end")
Exemplo n.º 23
0
def main():
    """
    Program execution with picture directory parameters
    """
    if (len(sys.argv) != 3):
        print("The App arg is invalid. The style you can choose: \
                xingkong/tangguo/bijiasuo/worksoldiers.eg: python3 main.py ../data xingkong"
              )
        exit(1)

    style_type = sys.argv[2]
    if style_type == "tangguo":
        model_path = '../model/tangguo_fp32_nchw_no_aipp.om'
    elif style_type == "bijiasuo":
        model_path = '../model/bijiasuo_fp32_nchw_no_aipp.om'
    elif style_type == "worksoldiers":
        model_path = '../model/work_soldiers_fp32_nchw_no_aipp.om'
    elif style_type == "xingkong":
        model_path = '../model/xingkong1_fp32_nchw_no_aipp.om'

    acl_resource = AclLiteResource()
    acl_resource.init()
    model = AclLiteModel(model_path)

    image_dir = sys.argv[1]
    images_list = [
        os.path.join(image_dir, img) for img in os.listdir(image_dir)
        if os.path.splitext(img)[1] in const.IMG_EXT
    ]
    if not os.path.isdir('../out'):
        os.mkdir('../out')

    for image_file in images_list:
        orig_shape, rgb_data = pre_process(image_file)
        print("pre process end")
        result_list = model.execute([rgb_data])
        print("Execute end")
        post_process(result_list, orig_shape, image_file)
        print("postprocess end")
Exemplo n.º 24
0
def main():
    """main"""
    #acl init
    if (len(sys.argv) != 3):
        print("The App arg is invalid")
        exit(1)
    acl_resource = AclLiteResource()
    acl_resource.init()
    model = AclLiteModel(MODEL_PATH)
    #x=296
    #y=330
    #x=410
    #y=664
    coordinate = [-1, -1]

    #From the parameters of the picture storage directory, reasoning by a picture
    coordinate = [int(sys.argv[1]), int(sys.argv[2])]

    if not os.path.exists(DATA_PATH):
        os.mkdir(DATA_PATH)

    if not os.path.exists(MASK_PATH):
        os.mkdir(MASK_PATH)

    if not os.path.exists(OUTPUT_PATH):
        os.mkdir(OUTPUT_PATH)

    images_list = [os.path.join(DATA_PATH, img)
                   for img in os.listdir(DATA_PATH)
                   if os.path.splitext(img)[1] in const.IMG_EXT]
    #infer picture
    for pic in images_list:
        #get pic data
        orig_shape, l_data, im_info = preprocess(pic)
        #inference
        result_list = model.execute([l_data, im_info])
        #postprocess
        postprocess(result_list, pic, coordinate, OUTPUT_PATH)
    print("Execute end")
Exemplo n.º 25
0
def main():
    """
    main
    """
    #create output directory
    if not os.path.exists(OUTPUT_DIR):
        os.mkdir(OUTPUT_DIR)

    #acl init
    acl_resource = AclLiteResource()
    acl_resource.init()

    #load model
    model = AclLiteModel(MODEL_PATH)
    src_dir = os.listdir(INPUT_DIR)
    #infer picture
    for pic in src_dir:
        if not pic.lower().endswith(('.bmp', '.dib', '.png', '.jpg', '.jpeg',
                                     '.pbm', '.pgm', '.ppm', '.tif', '.tiff')):
            print('it is not a picture, %s, ignore this file and continue,' %
                  pic)
            continue

        #read picture
        pic_path = os.path.join(INPUT_DIR, pic)
        bgr_img = cv2.imread(pic_path)

        #get pic data
        orig_shape, test_img = preprocess(bgr_img)

        #inference
        result_list = model.execute([
            test_img,
        ])

        #postprocess
        postprocess(result_list, pic)
Exemplo n.º 26
0
class Classify(object):
    """classify"""
    def __init__(self, model_path, model_width, model_height):
        self._model_path = model_path
        self._model_width = model_width
        self._model_height = model_height
        self._model = AclLiteModel(model_path)

    def __del__(self):
        print("[Sample] class Samle release source success")

    def pre_process(self, image):
        """preprocess"""
        input_image = Image.open(image)
        input_image = input_image.resize((224, 224))
        # hwc
        img = np.array(input_image)
        height = img.shape[0]
        width = img.shape[1]
        h_off = int((height - 224) / 2)
        w_off = int((width - 224) / 2)
        crop_img = img[h_off:height - h_off, w_off:width - w_off, :]
        # rgb to bgr
        print("crop shape = ", crop_img.shape)
        img = crop_img[:, :, ::-1]
        shape = img.shape
        print("img shape = ", shape)
        img = img.astype("float32")
        img[:, :, 0] *= 0.003922
        img[:, :, 1] *= 0.003922
        img[:, :, 2] *= 0.003922
        img[:, :, 0] -= 0.4914
        img[:, :, 0] = img[:, :, 0] / 0.2023
        img[:, :, 1] -= 0.4822
        img[:, :, 1] = img[:, :, 1] / 0.1994
        img[:, :, 2] -= 0.4465
        img[:, :, 2] = img[:, :, 2] / 0.2010
        img = img.reshape([1] + list(shape))
        # nhwc -> nchw
        result = img.transpose([0, 3, 1, 2]).copy()
        return result

    def inference(self, resized_image):
        """inference"""
        return self._model.execute([
            resized_image,
        ])

    def post_process(self, infer_output, image_file):
        """postprocess"""
        print("post process")
        data = infer_output[0]
        print("data shape = ", data.shape)
        vals = data.flatten()
        max = 0
        sum = 0
        for i in range(0, 10):
            if vals[i] > max:
                max = vals[i]
        for i in range(0, 10):
            vals[i] = np.exp(vals[i] - max)
            sum += vals[i]
        for i in range(0, 10):
            vals[i] /= sum
        print("vals shape = ", vals.shape)
        top_k = vals.argsort()[-1:-6:-1]
        print("images:{}".format(image_file))
        print("======== top5 inference results: =============")
        for n in top_k:
            object_class = get_resnet50_class(n)
            print("label:%d  confidence: %f, class: %s" %
                  (n, vals[n], object_class))

        #using pillow, the category with the highest confidence is written on the image and saved locally
        if len(top_k):
            object_class = get_resnet50_class(top_k[0])
            output_path = os.path.join(os.path.join(SRC_PATH, "../out"),
                                       os.path.basename(image_file))
            origin_img = Image.open(image_file)
            draw = ImageDraw.Draw(origin_img)
            font = ImageFont.truetype(
                "/usr/share/fonts/truetype/dejavu/DejaVuSans-Bold.ttf",
                size=20)
            draw.text((10, 50), object_class, font=font, fill=255)
            origin_img.save(output_path)
Exemplo n.º 27
0
class Classify(object):
    def __init__(self, acl_resource, model_path, model_width, model_height):
        self.total_buffer = None
        self._model_path = model_path
        self._model_width = model_width
        self._model_height = model_height

        self._model = AclLiteModel(model_path)
        self._dvpp = AclLiteImageProc(acl_resource)
        print("The App arg is __init__")

    def __del__(self):
        if self.total_buffer:
            acl.rt.free(self.total_buffer)  
        if self._dvpp:
            del self._dvpp
        print("[Sample] class Samle release source success")

    def pre_process(self, image):
        yuv_image = self._dvpp.jpegd(image)
        print("decode jpeg end")
        resized_image = self._dvpp.resize(yuv_image, 
                        self._model_width, self._model_height)
        print("resize yuv end")
        return resized_image
    
    def batch_process(self, resized_image_list, batch):
        resized_img_data_list = []
        resized_img_size = resized_image_list[0].size
        total_size = batch * resized_img_size
        stride = 0
        for resized_image in resized_image_list:
            resized_img_data_list.append(resized_image.data())
        self.total_buffer, ret = acl.rt.malloc(total_size, ACL_MEM_MALLOC_HUGE_FIRST)
        check_ret("acl.rt.malloc", ret)    
        for i in range(len(resized_image_list)):
            ret = acl.rt.memcpy(self.total_buffer + stride, resized_img_size,\
                        resized_img_data_list[i], resized_img_size,\
                        ACL_MEMCPY_DEVICE_TO_DEVICE)
            check_ret("acl.rt.memcpy", ret)
            stride += resized_img_size
        return total_size
    
    def inference(self, resized_image_list, batch):
        total_size = self.batch_process(resized_image_list, batch)
        batch_buffer = {'data': self.total_buffer, 'size':total_size}
        return self._model.execute([batch_buffer, ])
    
    def post_process(self, infer_output, batch_image_files, number_of_images):
        print("post process") 
        datas = infer_output[0]
        
        for number in range(number_of_images):
            data = datas[number]
            vals = data.flatten()
            top_k = vals.argsort()[-1:-6:-1]
            print("images:{}".format(batch_image_files[number]))
            print("======== top5 inference results: =============")
            for n in top_k:
                object_class = get_image_net_class(n)
                print("label:%d  confidence: %f, class: %s" % (n, vals[n], object_class))
            
            #Use Pillow to write the categories with the highest confidence on the image and save them locally
            if len(top_k):
                object_class = get_image_net_class(top_k[0])
                output_path = os.path.join("../out", os.path.basename(batch_image_files[number]))
                origin_img = Image.open(batch_image_files[number])
                draw = ImageDraw.Draw(origin_img)
                font = ImageFont.truetype("/usr/share/fonts/truetype/dejavu/DejaVuSans-Bold.ttf", size=20)
                draw.text((10, 50), object_class, font=font, fill=255)
                origin_img.save(output_path)
Exemplo n.º 28
0
class VideoSuperResolution(object):
    """
    video super resolution
    """
    def __init__(self, model_path, model_width, model_height, scale,
                 num_frames, set_file, batch_size, input_dir, input_name,
                 output_dir):
        self._model_path = model_path
        self._model_width = model_width
        self._model_height = model_height
        self._model = AclLiteModel(model_path)

        self.scale = scale
        self.num_frames = num_frames
        self.set_file = set_file
        self.batch_size = batch_size
        self.input_dir = input_dir
        self.input_name = input_name
        self.output_dir = output_dir

    def __del__(self):
        print("[Sample] class Samle release source success")

    def _get_fps(self):
        probe = ffmpeg.probe(str(os.path.join(self.input_dir,
                                              self.input_name)))
        stream_data = next(
            (stream for stream in probe['streams']
             if stream['codec_type'] == 'video'),
            None,
        )
        frame_rate = eval(stream_data['avg_frame_rate'])
        return frame_rate

    def extract_raw_frames(self):
        """
        extract frames from video
        """
        source_path = os.path.join(self.input_dir, self.input_name)

        target_path = os.path.join(self.input_dir, "images")
        if os.path.exists(target_path):
            shutil.rmtree(target_path)

        os.makedirs(target_path)
        print(source_path)
        vidcap = cv2.VideoCapture(source_path)
        success, image = vidcap.read()

        total_frames = vidcap.get(cv2.CAP_PROP_FRAME_COUNT)

        meta, meta4 = make_reds_dataset.get_videos_meta(
            self._model_width, self._model_height, total_frames)
        make_reds_dataset.split_sets(self.input_dir, meta, meta4)
        count = 0
        while success:
            image = cv2.resize(image, (self._model_width, self._model_height),
                               interpolation=cv2.INTER_AREA)
            cv2.imwrite(os.path.join(target_path, "%.8d.png") % count, image)
            success, image = vidcap.read()
            count += 1

        return target_path

    def build_video(self, output_img_path, output_video_name):
        """
        build video from frames
        """
        output_video_name = os.path.join(self.output_dir, output_video_name)
        if os.path.exists(output_video_name):
            os.remove(output_video_name)

        fps = (self._get_fps()) * FPS_MUL
        print(fps)

        template = os.path.join(output_img_path, '%8d.png')
        ffmpeg.input(str(template), format='image2',
                     framerate=38).output(
                         output_video_name,
                         crf=17,
                         vcodec='libx264',
                         pix_fmt='yuv420p').run(capture_stdout=True)

    def inference(self):
        """
        super resolution inference
        """
        dataloader = build_test_dataloader(batch_size=self.batch_size,
                                           scale=self.scale,
                                           set_file=self.set_file,
                                           num_frames=self.num_frames,
                                           data_config=self.input_dir)

        output_img_dir = os.path.join(self.output_dir, "images")
        if os.path.exists(output_img_dir):
            shutil.rmtree(output_img_dir)
        os.makedirs(output_img_dir)

        ave_time = 0
        max_frame = len(dataloader)
        print("max_frame ", max_frame)
        for i in range(max_frame):
            lr_names, lr = dataloader.get_next()

            sr = self._model.execute([lr])
            sr = np.asarray(sr).squeeze().astype(np.uint8)

            im_name = lr_names[0].split(os.path.sep)
            output_img_path = os.path.join(output_img_dir, *im_name[-1:])

            print('Save high resolution image : ', output_img_path)
            imageio.imwrite(output_img_path, sr)

        print('Save high resolution image complete!')
        return output_img_dir
Exemplo n.º 29
0
def main():
    if (len(sys.argv) != 2):
        print("Please input video path")
        exit(1)
    #ACL resource initialization
    acl_resource = AclLiteResource()
    acl_resource.init()
    #load model
    model = AclLiteModel(MODEL_PATH)
    #open video
    video_path = sys.argv[1]
    print("open video ", video_path)
    cap = cv.VideoCapture(video_path)
    fps = cap.get(cv.CAP_PROP_FPS)
    Width = int(cap.get(cv.CAP_PROP_FRAME_WIDTH))
    Height = int(cap.get(cv.CAP_PROP_FRAME_HEIGHT))

    lf.set_img_size((Width, Height))

    #create output directory
    if not os.path.exists(OUTPUT_DIR):
        os.mkdir(OUTPUT_DIR)
    output_Video = os.path.basename(video_path)
    output_Video = os.path.join(OUTPUT_DIR, output_Video)

    fourcc = cv.VideoWriter_fourcc(
        *'mp4v')  # DIVX, XVID, MJPG, X264, WMV1, WMV2
    outVideo = cv.VideoWriter(output_Video, fourcc, fps, (Width, Height))

    # Read until video is completed
    while (cap.isOpened()):
        ret, frame = cap.read()
        if ret == True:
            #preprocess
            data, orig = preprocess(frame)
            #Send into model inference
            result_list = model.execute([
                data,
            ])
            #Process inference results
            result_return = post_process(result_list, orig)
            print("result = ", result_return)
            #Process lane line
            frame_with_lane = preprocess_frame(frame)

            distance = np.zeros(shape=(len(result_return['detection_classes']),
                                       1))
            for i in range(len(result_return['detection_classes'])):
                box = result_return['detection_boxes'][i]
                class_name = result_return['detection_classes'][i]
                confidence = result_return['detection_scores'][i]
                distance[i] = calculate_position(
                    bbox=box,
                    transform_matrix=perspective_transform,
                    warped_size=WARPED_SIZE,
                    pix_per_meter=pixels_per_meter)
                label_dis = '{} {:.2f}m'.format('dis:', distance[i][0])
                cv.putText(frame_with_lane, label_dis,
                           (int(box[1]) + 10, int(box[2]) + 15),
                           cv.FONT_ITALIC, 0.6, colors[i % 6], 1)

                cv.rectangle(frame_with_lane, (int(box[1]), int(box[0])),
                             (int(box[3]), int(box[2])), colors[i % 6])
                p3 = (max(int(box[1]), 15), max(int(box[0]), 15))
                out_label = class_name
                cv.putText(frame_with_lane, out_label, p3, cv.FONT_ITALIC, 0.6,
                           colors[i % 6], 1)

            outVideo.write(frame_with_lane)
        # Break the loop
        else:
            break
    cap.release()
    outVideo.release()
    print("Execute end")
Exemplo n.º 30
0
class EdgeDetection(object):
    """
    Class for portrait segmentation
    """
    def __init__(self, model_path, model_width, model_height):
        self._model_path = model_path
        self._model_width = model_width
        self._model_height = model_height
        self._img_width = 0
        self._img_height = 0
        self._model = None

    def init(self):
        """
        Initialize
        """
        # Load model
        self._model = AclLiteModel(self._model_path)

        return const.SUCCESS

    @utils.display_time
    def pre_process(self, im):
        """
        image preprocess
        """
        self._img_width = im.size[0]
        self._img_height = im.size[1]
        im = im.resize((512, 512))
        # hwc
        img = np.array(im)
        # rgb to bgr
        img = img[:, :, ::-1]
        img = img.astype("float16")
        result = img.transpose([2, 0, 1]).copy()
        return result 

    @utils.display_time
    def inference(self, input_data):
        """
        model inference
        """
        return self._model.execute(input_data)

    def sigmoid(self, x):
        """
        sigmod function
        """
        return 1. / (1 + np.exp(-x))

    @utils.display_time
    def post_process(self, infer_output, image_name):
        """
        Post-processing, analysis of inference results
        """
        out_size = [512, 256, 128, 64, 63]
        edge = np.zeros((len(out_size), out_size[0], out_size[0]), dtype=np.float64)
        for idx in range(5):
            result = infer_output[idx]
            img = np.array(result)
            img = np.reshape(img, (out_size[idx], out_size[idx]))
            if idx != 0:
                img = Image.fromarray(img)
                img = img.resize((out_size[0], out_size[0]))
                img = np.array(img)
            edge[idx] = img
        final_edge = 0.2009036 * edge[0] + 0.2101715 * edge[1] + \
                        0.22262956 * edge[2] + 0.22857015 * edge[3] + \
                                0.2479302 * edge[4] + 0.00299916

        final_edge = self.sigmoid(final_edge)
        resultimage = Image.fromarray(np.uint8((1 - final_edge)*255))
        resultimage = resultimage.resize((self._img_width, self._img_height))
        resultimage.save('../out/out' + image_name)