Exemplo n.º 1
0
def main():
    """main"""
    #acl init
    if (len(sys.argv) != 2):
        print("The App arg is invalid")
        exit(1)
    acl_resource = AclResource()
    acl_resource.init()
    model = Model(MODEL_PATH)
    dvpp = Dvpp(acl_resource)

    #From the parameters of the picture storage directory, reasoning by a picture
    image_dir = sys.argv[1]
    images_list = [
        os.path.join(image_dir, img) for img in os.listdir(image_dir)
        if os.path.splitext(img)[1] in const.IMG_EXT
    ]

    if not os.path.isdir(os.path.join(SRC_PATH, "../outputs")):
        os.mkdir(os.path.join(SRC_PATH, "../outputs"))

    #infer picture
    for pic in images_list:
        #get pic data
        orig_shape, l_data = preprocess(pic)

        #inference
        result_list = model.execute([l_data])

        #postprocess
        postprocess(result_list, pic, orig_shape, pic)
    print("Execute end")
Exemplo n.º 2
0
def main():

    acl_resource = AclResource()
    acl_resource.init()
    
    mnist = input_data.read_data_sets('MNIST_data/', one_hot=True)
    
    #load model
    model = Model(model_path)
    
    images = mnist.test.images
    labels = mnist.test.labels
    total = len(images)
    correct = 0.0
    
    start = time.time()
    for i in  range(len(images)):
        result = model.execute([images[i]])
        label = labels[i]
        if np.argmax(result[0])==np.argmax(label):
            correct+=1.0
        if i%1000==0:
            print(f'infer {i+1} pics...')
    end = time.time()
    print(f'infer finished, acc is {correct/total}, use time {(end-start)*1000}ms')
Exemplo n.º 3
0
def main():
    """
    Program execution with picture directory parameters
    """
    
    if (len(sys.argv) != 2):
        print("The App arg is invalid")
        exit(1)
    
    acl_resource = AclResource()
    acl_resource.init()
    model = Model(MODEL_PATH)
    dvpp = Dvpp(acl_resource)
    
    #From the parameters of the picture storage directory, reasoning by a picture
    image_dir = sys.argv[1]
    images_list = [os.path.join(image_dir, img)
                   for img in os.listdir(image_dir)
                   if os.path.splitext(img)[1] in const.IMG_EXT]
    #Create a directory to store the inference results
    if not os.path.isdir('../outputs'):
        os.mkdir('../outputs')

    image_info = construct_image_info()

    for image_file in images_list:
        #read picture
        image = AclImage(image_file)
        #preprocess image
        resized_image = pre_process(image, dvpp)
        print("pre process end")
        #reason pictures
        result = model.execute([resized_image, image_info])    
        #process resresults
        post_process(result, image, image_file)
Exemplo n.º 4
0
def main():
    """
    main
    """
    if (len(sys.argv) != 2):
        print("The App arg is invalid")
        exit(1)

    acl_resource = AclResource()
    acl_resource.init()
    model = Model(MODEL_PATH)

    data_dir = sys.argv[1]
    data_list = [
        os.path.join(data_dir, testdata) for testdata in os.listdir(data_dir)
        if os.path.splitext(testdata)[1] in ['.bin']
    ]

    #Create a directory to store the inference results
    if not os.path.isdir(os.path.join(SRC_PATH, "../outputs")):
        os.mkdir(os.path.join(SRC_PATH, "../outputs"))

    for data_file in data_list:
        data_raw = np.fromfile(data_file, dtype=np.float32)
        input_data = data_raw.reshape(16, MODEL_WIDTH, MODEL_HEIGHT, 3).copy()
        result = model.execute([
            input_data,
        ])
        post_process(result, data_file)

    print("process  end")
Exemplo n.º 5
0
def main():
    """
    Program execution
    """
    if not os.path.exists(OUTPUT_DIR):
        os.mkdir(OUTPUT_DIR)

    acl_resource = AclResource()  # acl intialize
    acl_resource.init()

    model = Model(model_path)  # load model

    src_dir = os.listdir(INPUT_DIR)
    for pic in src_dir:
        if not pic.lower().endswith(('.bmp', '.dib', '.png', '.jpg', '.jpeg', '.pbm', '.pgm', '.ppm', '.tif', '.tiff')):
            print('it is not a picture, %s, ignore this file and continue,' % pic)
            continue
        pic_path = os.path.join(INPUT_DIR, pic)
        RGB_image, o_h, o_w = pre_process(pic_path)  # preprocess

        start_time = time.time()
        result_list = model.execute([RGB_image, ])  # inferring
        end_time = time.time()
        print('pic:{}'.format(pic))
        print('pic_size:{}x{}'.format(o_h, o_w))
        print('time:{}ms'.format(int((end_time - start_time) * 1000)))
        print('\n')
        post_process(result_list, pic, o_h, o_w)  # postprocess
    print('task over')
Exemplo n.º 6
0
def main():
    if (len(sys.argv) != 2):
        print("The App arg is invalid")
        exit(1)

    acl_resource = AclResource()
    acl_resource.init()
    model = Model(MODEL_PATH)
    dvpp = Dvpp(acl_resource)

    image_dir = sys.argv[1]
    images_list = [
        os.path.join(image_dir, img) for img in os.listdir(image_dir)
        if os.path.splitext(img)[1] in const.IMG_EXT
    ]

    #Create a directory to store the inference results
    if not os.path.isdir(os.path.join(SRC_PATH, "../outputs")):
        os.mkdir(os.path.join(SRC_PATH, "../outputs"))

    image_info = construct_image_info()
    for image_file in images_list:
        image = AclImage(image_file)
        resized_image = pre_process(image, dvpp)
        print("pre process end")

        result = model.execute([
            resized_image,
        ])
        post_process(result, image_file)

        print("process " + image_file + " end")
Exemplo n.º 7
0
    def init(self):
        """
        Initialize
        """
        # 加载模型
        self._model = Model(self._model_path)

        return const.SUCCESS
Exemplo n.º 8
0
 def init(self):    
     """
     init
     """ 
     self._dvpp = Dvpp() 
     self._model = Model(self._model_path)
 
     return constants.SUCCESS
Exemplo n.º 9
0
    def init(self):
        """
        Initialize
        """
        # Load model
        self._model = Model(self._model_path)

        return constants.SUCCESS
Exemplo n.º 10
0
    def init(self):
        """
        Initialize
        """
        self._dvpp = Dvpp()
        # Load model
        self._model = Model(self._model_path)

        return const.SUCCESS
Exemplo n.º 11
0
def main():

    if not os.path.exists(OUTPUT_DIR):
        os.mkdir(OUTPUT_DIR)
    #ACL resource initialization
    acl_resource = AclResource()
    acl_resource.init()
    #load model
    model = Model(MODEL_PATH)
    images_list = [
        os.path.join(INPUT_DIR, img) for img in os.listdir(INPUT_DIR)
        if os.path.splitext(img)[1] in const.IMG_EXT
    ]
    #Read images from the data directory one by one for reasoning
    for pic in images_list:
        #read image

        bgr_img = cv.imread(pic)
        #preprocess
        data, orig = preprocess(pic)
        #data, orig = preprocess(bgr_img)
        #Send into model inference
        result_list = model.execute([
            data,
        ])
        #Process inference results
        result_return = post_process(result_list, orig)

        print("result = ", result_return)

        for i in range(len(result_return['detection_classes'])):
            box = result_return['detection_boxes'][i]
            class_name = result_return['detection_classes'][i]
            confidence = result_return['detection_scores'][i]
            cv.rectangle(bgr_img, (int(box[1]), int(box[0])),
                         (int(box[3]), int(box[2])), colors[i % 6])
            p3 = (max(int(box[1]), 15), max(int(box[0]), 15))
            out_label = class_name
            cv.putText(bgr_img, out_label, p3, cv.FONT_ITALIC, 0.6,
                       colors[i % 6], 1)

        output_file = os.path.join(OUTPUT_DIR, "out_" + os.path.basename(pic))
        print("output:%s" % output_file)
        cv.imwrite(output_file, bgr_img)

    print("Execute end")
Exemplo n.º 12
0
class VggSsd(object):
    def __init__(self, acl_resource, model_path, model_width, model_height):
        self._acl_resource = acl_resource
        self._model_width = model_width
        self._model_height = model_height
        #加载离线模型
        self._model = Model(model_path)

    def __del__(self):
        if self._model:
            del self._model

    def execute(self, data):
        #将数据送入离线模型推理
        return self._model.execute([
            data.resized_image,
        ])

    def post_process(self, infer_output, data):
        #vgg ssd有两个输出,第一个输出infer_output[0]为检测到的物体个数,shape为(1,8)
        box_num = int(infer_output[0][0, 0])

        #第二个输出infer_output[1]为检测到的物体信息,shape为(1, 200, 8)
        box_info = infer_output[1][0]
        detection_result_list = []

        for i in range(box_num):
            #检测到的物体置信度
            score = box_info[i, SCORE]
            if score < 0.9:
                break

            detection_item = presenter_datatype.ObjectDetectionResult()
            detection_item.confidence = score
            #人脸位置框坐标, 是归一化的坐标,需要乘以图片宽高转换为图片上的坐标
            detection_item.box.lt.x = int(box_info[i, TOP_LEFT_X] *
                                          data.frame_width)
            detection_item.box.lt.y = int(box_info[i, TOP_LEFT_Y] *
                                          data.frame_height)
            detection_item.box.rb.x = int(box_info[i, BOTTOM_RIGHT_X] *
                                          data.frame_width)
            detection_item.box.rb.y = int(box_info[i, BOTTOM_RIGHT_Y] *
                                          data.frame_height)
            #将置信度组织为字符串
            detection_item.result_text = str(
                round(detection_item.confidence * 100, 2)) + "%"
            detection_result_list.append(detection_item)

        self.print_detection_results(detection_result_list, data.channel)

        return detection_result_list

    def print_detection_results(self, results, channel_id):
        for item in results:
            print("channel %d inference result: box top left(%d, %d), "
                  "bottom right(%d %d), score %s" %
                  (channel_id, item.box.lt.x, item.box.lt.y, item.box.rb.x,
                   item.box.rb.y, item.result_text))
Exemplo n.º 13
0
def main():
    if not os.path.exists(OUTPUT_DIR):
        os.mkdir(OUTPUT_DIR)
    #ACL resource initialization
    acl_resource = AclResource()
    acl_resource.init()
    #load model
    model = Model(MODEL_PATH)
    images_list = [os.path.join(INPUT_DIR, img)
                   for img in os.listdir(INPUT_DIR)
                   if os.path.splitext(img)[1] in const.IMG_EXT]
    #Read images from the data directory one by one for reasoning
    for pic in images_list:
        #read image
        bgr_img = cv.imread(pic)
        #preprocess
        data, orig = preprocess(pic)
        #Send into model inference
        result_list = model.execute([data,])
        #Process inference results
        result_return = post_process(result_list, orig)
        print("result = ", result_return)
        #Process lane line
        frame_with_lane = preprocess_frame(bgr_img)
        distance = np.zeros(shape=(len(result_return['detection_classes']), 1))

        for i in range(len(result_return['detection_classes'])):
            box = result_return['detection_boxes'][i]
            class_name = result_return['detection_classes'][i]
            confidence = result_return['detection_scores'][i]
            distance[i] = calculate_position(bbox=box, transform_matrix=perspective_transform,
                        warped_size=WARPED_SIZE, pix_per_meter=pixels_per_meter)
            label_dis = '{} {:.2f}m'.format('dis:', distance[i][0])
            cv.putText(frame_with_lane, label_dis, (int(box[1]) + 10, int(box[2]) + 15), 
                        cv.FONT_ITALIC, 0.6, colors[i % 6], 1)

            cv.rectangle(frame_with_lane, (int(box[1]), int(box[0])), (int(box[3]), int(box[2])), colors[i % 6])
            p3 = (max(int(box[1]), 15), max(int(box[0]), 15))
            out_label = class_name
            cv.putText(frame_with_lane, out_label, p3, cv.FONT_ITALIC, 0.6, colors[i % 6], 1)

        output_file = os.path.join(OUTPUT_DIR, "out_" + os.path.basename(pic))
        print("output:%s" % output_file)
        cv.imwrite(output_file, frame_with_lane)
    print("Execute end")
Exemplo n.º 14
0
class Classify(object):
    """
    Class for portrait segmentation
    """
    def __init__(self, model_path, model_width, model_height):
        self._model_path = model_path
        self._model_width = model_width
        self._model_height = model_height
        self._img_width = 0
        self._img_height = 0
        self._model = None
        self._dvpp = None

    def init(self):
        """
        Initialize
        """
        self._dvpp = Dvpp()
        # Load model
        self._model = Model(self._model_path)

        return const.SUCCESS

    @utils.display_time
    def pre_process(self, image):
        """
        preprocess 
        """
        image_dvpp = image.copy_to_dvpp()
        yuv_image = self._dvpp.jpegd(image_dvpp)
        resized_image = self._dvpp.resize(yuv_image, self._model_width,
                                          self._model_height)
        return resized_image

    @utils.display_time
    def inference(self, input_data):
        """
        model inference
        """
        return self._model.execute(input_data)

    @utils.display_time
    def post_process(self, infer_output, image_file):
        """
        Post-processing, analysis of inference results
        """
        output_path = os.path.join(OUTPUT_DIR, os.path.basename(image_file))
        infer_result = infer_output[0]
        vals = infer_result.flatten()
        pre_index = vals.argsort()[-1]

        origin_img = Image.open(image_file)
        draw = ImageDraw.Draw(origin_img)
        font = ImageFont.load_default()
        draw.text((10, 50), CLS[pre_index], font=font, fill=255)
        origin_img.save(output_path)
Exemplo n.º 15
0
def main():
    """main"""
    #Initialize acl
    acl_resource = AclResource()
    acl_resource.init()
    #Create a detection network instance, currently using the vgg_ssd network.
    # When the detection network is replaced, instantiate a new network here
    detect = VggSsd(acl_resource, MODEL_WIDTH, MODEL_HEIGHT)
    #Load offline model
    model = Model(MODEL_PATH)
    #Connect to the presenter server according to the configuration,
    # and end the execution of the application if the connection fails
    chan = presenteragent.presenter_channel.open_channel(FACE_DETEC_CONF)
    if chan is None:
        print("Open presenter channel failed")
        return
    #Open the CARAMER0 camera on the development board
    cap = Camera(0)
    while True:
        #Read a picture from the camera
        image = cap.read()
        if image is None:
            print("Get memory from camera failed")
            break

        #The detection network processes images into model input data
        model_input = detect.pre_process(image)
        if model_input is None:
            print("Pre process image failed")
            break
        #Send data to offline model inference
        result = model.execute(model_input)
        #Detecting network analysis inference output
        jpeg_image, detection_list = detect.post_process(result, image)
        if jpeg_image is None:
            print("The jpeg image for present is None")
            break

        chan.send_detection_data(CAMERA_FRAME_WIDTH, CAMERA_FRAME_HEIGHT,
                                 jpeg_image, detection_list)
Exemplo n.º 16
0
class Cartoonization(object):
    """
    class for Cartoonization
    """
    def __init__(self, model_path, model_width, model_height):
        self._model_path = model_path
        self._model_width = model_width
        self._model_height = model_height
        self.device_id = 0
        self._dvpp = None
        self._model = None

    def init(self):
        """
        Initialize
        """
        # Initialize dvpp
        self._dvpp = Dvpp()

        # Load model
        self._model = Model(self._model_path)

        return const.SUCCESS

    @utils.display_time
    def pre_process(self, image):
        """
        image preprocess
        """
        image_dvpp = image.copy_to_dvpp()
        yuv_image = self._dvpp.jpegd(image_dvpp)
        crop_and_paste_image = self._dvpp.crop_and_paste_get_roi(yuv_image, image.width, image.height, \
                                self._model_width, self._model_height)
        return crop_and_paste_image

    @utils.display_time
    def inference(self, resized_image):
        """
        model inference
        """
        return self._model.execute(resized_image)

    @utils.display_time
    def post_process(self, infer_output, image_file, origin_image):
        """
        post process
        """
        data = ((np.squeeze(infer_output[0]) + 1) * 127.5)
        img = cv2.cvtColor(data, cv2.COLOR_RGB2BGR)
        img = cv2.resize(img, (origin_image.width, origin_image.height))
        output_path = os.path.join("../outputs", os.path.basename(image_file))
        cv2.imwrite(output_path, img)
Exemplo n.º 17
0
class Classify(object):
    """
    Class for portrait segmentation
    """
    def __init__(self, model_path):
        self._model_path = model_path
        self._model = None

    def init(self):
        """
        Initialize
        """
        # 加载模型
        self._model = Model(self._model_path)

        return const.SUCCESS

    @utils.display_time
    def inference(self, input_data):
        """
        model inference
        """
        return self._model.execute(input_data)
Exemplo n.º 18
0
class Hpa(object):
    """
    Class for portrait segmentation
    """
    def __init__(self, model_path, model_width, model_height):
        self._model_path = model_path
        self._model_width = model_width
        self._model_height = model_height
        self._img_width = 0
        self._img_height = 0
        self._model = None

    def init(self):
        """
        Initialize
        """
        # Load model
        self._model = Model(self._model_path)

        return constants.SUCCESS

    def pre_process(self, im):
        """
        image preprocess
        """
        self._img_width = im.size[0]
        self._img_height = im.size[1]
        im = im.resize((224, 224))
        # hwc
        img = np.array(im)
        # rgb to bgr
        img = img[:, :, ::-1]
        img = img.astype("float16")
        result = img.transpose([2, 0, 1]).copy()
        return result

    def inference(self, input_data):
        """
        model inference
        """
        return self._model.execute(input_data)

    def sigmoid(self, x):
        """
        sigmod function
        """
        return 1. / (1 + np.exp(-x))

    def visualize(self, file_name, pred):
        """
        visualize
        """

        # 1, ID and name corresponding
        id_2_label = [
            "Mitochondria", "Nucleus", "Endoplasmic reticulum",
            "Nuclear speckles", "Plasma membrane", "Nucleoplasm", "Cytosol",
            "Nucleoli", "Vesicles", "Golgi apparatus"
        ]

        # 2. Read pictures
        setFont = ImageFont.truetype('./font.ttf', 20)
        fillColor = "#fff"
        im = Image.open(file_name)
        im = im.resize((512, 512))
        draw = ImageDraw.Draw(im)
        pred = pred.flatten()
        top1 = np.argmax(pred)
        print(id_2_label[top1], pred[top1])
        label = "%s : %.2f%%" % (id_2_label[top1], float(pred[top1]) * 100)
        pred[top1] = 0
        draw.text(xy=(20, 20), text=label, font=setFont, fill=fillColor)

        top2 = np.argmax(pred)
        print(top2, pred.shape)
        label = "%s : %.2f%%" % (id_2_label[top2], float(pred[top2]) * 100)
        pred[top2] = 0
        draw.text(xy=(20, 50), text=label, font=setFont, fill=fillColor)

        top3 = np.argmax(pred)
        label = "%s : %.2f%%" % (id_2_label[top3], float(pred[top3]) * 100)
        pred[top3] = 0
        draw.text(xy=(20, 80), text=label, font=setFont, fill=fillColor)

        # save photo
        im.save("../outputs/out_" + os.path.basename(file_name))

    def post_process(self, result, image_name):
        """
        post_process
        """

        score = np.array(result[0])
        pred = self.sigmoid(score)

        # visualize
        self.visualize(image_name, pred)
Exemplo n.º 19
0
class EdgeDetection(object):
    """
    Class for portrait segmentation
    """
    def __init__(self, model_path, model_width, model_height):
        self._model_path = model_path
        self._model_width = model_width
        self._model_height = model_height
        self._img_width = 0
        self._img_height = 0
        self._model = None

    def init(self):
        """
        Initialize
        """
        # Load model
        self._model = Model(self._model_path)

        return const.SUCCESS

    @utils.display_time
    def pre_process(self, im):
        """
        image preprocess
        """
        self._img_width = im.size[0]
        self._img_height = im.size[1]
        im = im.resize((512, 512))
        # hwc
        img = np.array(im)
        # rgb to bgr
        img = img[:, :, ::-1]
        img = img.astype("float16")
        result = img.transpose([2, 0, 1]).copy()
        return result

    @utils.display_time
    def inference(self, input_data):
        """
        model inference
        """
        return self._model.execute(input_data)

    def sigmoid(self, x):
        """
        sigmod function
        """
        return 1. / (1 + np.exp(-x))

    @utils.display_time
    def post_process(self, infer_output, image_name):
        """
        Post-processing, analysis of inference results
        """
        out_size = [512, 256, 128, 64, 63]
        edge = np.zeros((len(out_size), out_size[0], out_size[0]),
                        dtype=np.float64)
        for idx in range(5):
            result = infer_output[idx]
            img = np.array(result)
            img = np.reshape(img, (out_size[idx], out_size[idx]))
            if idx != 0:
                img = Image.fromarray(img)
                img = img.resize((out_size[0], out_size[0]))
                img = np.array(img)
            edge[idx] = img
        final_edge = 0.2009036 * edge[0] + 0.2101715 * edge[1] + \
                        0.22262956 * edge[2] + 0.22857015 * edge[3] + \
                                0.2479302 * edge[4] + 0.00299916

        final_edge = self.sigmoid(final_edge)
        resultimage = Image.fromarray(np.uint8((1 - final_edge) * 255))
        resultimage = resultimage.resize((self._img_width, self._img_height))
        resultimage.save('../outputs/out_' + image_name)
Exemplo n.º 20
0
class SingleImageDehaze(object):
    """
    Class for SingleImageDehaze
    """
    def __init__(self, model_path, model_width, model_height):
        self._model_path = model_path
        self._model_width = model_width
        self._model_height = model_height
        self._img_width = 0
        self._img_height = 0
        self._model = None

    def init(self):
        """
        Initialize
        """
        # Load model
        self._model = Model(self._model_path)

        return constants.SUCCESS

    def pre_process(self, im):
        """
        image preprocess
        """
        self._img_width = im.size[0]
        self._img_height = im.size[1]
        im = im.resize((512, 512))
        # hwc
        img = np.array(im)
        img = img / 127.5 - 1.

        # rgb to bgr
        img = img[:, :, ::-1]
        img = img.astype("float16")
        return img

    def inference(self, input_data):
        """
        model inference
        """
        return self._model.execute(input_data)

    def sigmoid(self, x):
        """
        sigmod function
        """
        return 1. / (1 + np.exp(-x))

    def post_process(self, infer_output, image_name):
        """
        Post-processing, analysis of inference results
        """
        result = []
        resultArray = np.array(infer_output[0])
        resultimage = np.reshape(infer_output[0], (512, 512, 3))
        resultimage = resultimage[:, :, ::-1]
        resultimage = np.clip(((resultimage) + 1.) / 2. * 255., 0,
                              255).astype(np.uint8)
        resultimage = Image.fromarray(resultimage)
        resultimage = resultimage.resize((self._img_width, self._img_height))
        resultimage.save('../outputs/out_' + image_name)
Exemplo n.º 21
0
 def __init__(self, acl_resource, model_path, model_width, model_height):
     self._acl_resource = acl_resource
     self._model_width = model_width
     self._model_height = model_height
     #加载离线模型
     self._model = Model(model_path)
Exemplo n.º 22
0
def main():
    if (len(sys.argv) != 2):
        print("Please input video path")
        exit(1)

    #ACL resource initialization
    acl_resource = AclResource()
    acl_resource.init()
    #load model
    model = Model(MODEL_PATH)

    #open video
    video_path = sys.argv[1]
    print("open video ", video_path)
    cap = cv.VideoCapture(video_path)
    fps = cap.get(cv.CAP_PROP_FPS)
    Width = int(cap.get(cv.CAP_PROP_FRAME_WIDTH))
    Height = int(cap.get(cv.CAP_PROP_FRAME_HEIGHT))

    lf.set_img_size((Width, Height))

    #create output directory
    if not os.path.exists(OUTPUT_DIR):
        os.mkdir(OUTPUT_DIR)
    output_Video = os.path.basename(video_path)
    output_Video = os.path.join(OUTPUT_DIR, output_Video)

    fourcc = cv.VideoWriter_fourcc(*'mp4v')  # DIVX, XVID, MJPG, X264, WMV1, WMV2
    outVideo = cv.VideoWriter(output_Video, fourcc, fps, (Width, Height))

    # Read until video is completed
    while (cap.isOpened()):
        ret, frame = cap.read()
        if ret == True:
            #preprocess
            data, orig = preprocess(frame)
            #Send into model inference
            result_list = model.execute([data,])
            #Process inference results
            result_return = post_process(result_list, orig)
            print("result = ", result_return)
            #Process lane line
            frame_with_lane = preprocess_frame(frame)

            distance = np.zeros(shape=(len(result_return['detection_classes']), 1))
            for i in range(len(result_return['detection_classes'])):
                box = result_return['detection_boxes'][i]
                class_name = result_return['detection_classes'][i]
                confidence = result_return['detection_scores'][i]
                distance[i] = calculate_position(bbox=box, transform_matrix=perspective_transform,
                            warped_size=WARPED_SIZE, pix_per_meter=pixels_per_meter)
                label_dis = '{} {:.2f}m'.format('dis:', distance[i][0])
                cv.putText(frame_with_lane, label_dis, (int(box[1]) + 10, int(box[2]) + 15), 
                            cv.FONT_ITALIC, 0.6, colors[i % 6], 1)

                cv.rectangle(frame_with_lane, (int(box[1]), int(box[0])), (int(box[3]), int(box[2])), colors[i % 6])
                p3 = (max(int(box[1]), 15), max(int(box[0]), 15))
                out_label = class_name
                cv.putText(frame_with_lane, out_label, p3, cv.FONT_ITALIC, 0.6, colors[i % 6], 1)

            outVideo.write(frame_with_lane)
        # Break the loop
        else:
            break
    cap.release()
    outVideo.release()
    print("Execute end")
Exemplo n.º 23
0
def main(opt):
    acl_resource = AclResource()
    acl_resource.init()

    mot_model = Model('../model/dlav0.om')

    # Create output dir if not exist; default outputs
    result_root = opt.output_root if opt.output_root != '' else '.'
    mkdir_if_missing(result_root)

    video_name = os.path.basename(opt.input_video).replace(' ',
                                                           '_').split('.')[0]

    # setup dataloader, use LoadVideo or LoadImages
    dataloader = LoadVideo(opt.input_video, (1088, 608))
    # result_filename = os.path.join(result_root, 'results.txt')
    frame_rate = dataloader.frame_rate

    # dir for output images; default: outputs/'VideoFileName'
    save_dir = os.path.join(result_root, video_name)
    if save_dir and os.path.exists(save_dir) and opt.rm_prev:
        shutil.rmtree(save_dir)
    mkdir_if_missing(save_dir)

    # initialize tracker
    tracker = JDETracker(opt, mot_model, frame_rate=frame_rate)
    timer = Timer()
    results = []

    # img:  h w c; 608 1088 3
    # img0: c h w; 3 608 1088
    for frame_id, (path, img, img0) in enumerate(dataloader):
        if frame_id % 20 == 0:
            print('Processing frame {} ({:.2f} fps)'.format(
                frame_id, 1. / max(1e-5, timer.average_time)))

        # run tracking, start tracking timer
        timer.tic()

        # list of Tracklet; see multitracker.STrack
        online_targets = tracker.update(np.array([img]), img0)

        # prepare for drawing, get all bbox and id
        online_tlwhs = []
        online_ids = []
        for t in online_targets:
            tlwh = t.tlwh
            tid = t.track_id
            vertical = tlwh[2] / tlwh[3] > 1.6
            if tlwh[2] * tlwh[3] > opt.min_box_area and not vertical:
                online_tlwhs.append(tlwh)
                online_ids.append(tid)
        timer.toc()

        # draw bbox and id
        online_im = vis.plot_tracking(img0,
                                      online_tlwhs,
                                      online_ids,
                                      frame_id=frame_id,
                                      fps=1. / timer.average_time)
        cv2.imwrite(os.path.join(save_dir, '{:05d}.jpg'.format(frame_id)),
                    online_im)
Exemplo n.º 24
0
class CrowdCount(object):
    """
    crowdCount
    """
    def __init__(self, model_path, model_width, model_height):
        self.device_id = 0
        self.context = None
        self.stream = None
        self._model = None
        self.run_mode  = None
        self._model_path = model_path
        self._model_width = model_width
        self._model_height = model_height
        self._dvpp = None
        self._model = None

    def init(self):    
        """
        init
        """ 
        self._dvpp = Dvpp() 
        self._model = Model(self._model_path)
    
        return constants.SUCCESS

    def pre_process(self, image):
        """
        image preprocess
        """
        image_dvpp = image.copy_to_dvpp()
        yuv_image = self._dvpp.jpegd(image_dvpp)
        crop_and_paste_image = \
            self._dvpp.crop_and_paste(yuv_image, image.width, image.height, self._model_width, self._model_height)
        return crop_and_paste_image

    def inference(self, input_data):
        """
        model inference
        """
        return self._model.execute(input_data)

    def post_process(self, infer_output, image_file):
        """
        Post-processing, analysis of inference results
        """
        orig = cv2.imread(image_file, 1)
        orig = cv2.resize(orig, (1200, 800))
        data = infer_output[0]
        vals = data.flatten()
        res = np.sum(vals, axis=0)
        result = round(res / 1000.0)
        data_2 = data.reshape(800, 1408)
        heatMap = data_2[:800, :1200]
        heatMap = heatMap.astype(np.uint8)
        heatMap = cv2.GaussianBlur(heatMap, (5, 5), cv2.BORDER_DEFAULT)
        cv2.normalize(heatMap, heatMap, 0, 255, cv2.NORM_MINMAX, cv2.CV_8UC1)
        heatMap = cv2.applyColorMap(heatMap, cv2.COLORMAP_JET)
        add_img = cv2.addWeighted(orig, 1, heatMap, 0.5, 0.0)
        cv2.putText(add_img, str(result), (30, 60), cv2.FONT_HERSHEY_PLAIN, 5, (0, 0, 255), 8)  
        output_path = os.path.join("./outputs", os.path.basename(image_file))
        cv2.imwrite(output_path, add_img)
Exemplo n.º 25
0
class Gesture(object):
    """
	define gesture class
    """
    def __init__(self, model_path, model_width, model_height):
        self.device_id = 0
        self.context = None
        self.stream = None
        self._model_path = model_path
        self._model_width = model_width
        self._model_height = model_height
        self._dvpp = None
        self._model = None

    def init(self):
        """
        Initialize
        """
        # Initialize dvpp
        self._dvpp = Dvpp()

        # Load model
        self._model = Model(self._model_path)

        return const.SUCCESS

    def pre_process(self, image):
        """
        pre_precess
        """
        image_dvpp = image.copy_to_dvpp()
        yuv_image = self._dvpp.jpegd(image_dvpp)
        print("decode jpeg end")
        resized_image = self._dvpp.resize(yuv_image, self._model_width,
                                          self._model_height)
        print("resize yuv end")
        return resized_image

    def inference(self, resized_image):
        """
	    inference
        """
        return self._model.execute(resized_image)

    def post_process(self, infer_output, image_file):
        """
	    post_process
        """
        print("post process")
        data = infer_output[0]
        vals = data.flatten()
        top_k = vals.argsort()[-1:-2:-1]
        print("images:{}".format(image_file))
        print("======== top5 inference results: =============")
        for n in top_k:
            object_class = get_gesture_categories(n)
            print("label:%d  confidence: %f, class: %s" %
                  (n, vals[n], object_class))

        if len(top_k):
            object_class = get_gesture_categories(top_k[0])
            output_path = os.path.join(os.path.join(SRC_PATH, "../outputs"),
                                       os.path.basename(image_file))
            origin_img = Image.open(image_file)
            draw = ImageDraw.Draw(origin_img)
            font = ImageFont.load_default()
            draw.text((10, 50), object_class, font=font, fill=255)
            origin_img.save(output_path)
Exemplo n.º 26
0
def main():
    """main"""
    if (len(sys.argv) != 2):
        print("The App arg is invalid")
        exit(1)

    acl_resource = AclResource()
    acl_resource.init()
    model = Model(MODEL_PATH)
    dvpp = Dvpp(acl_resource)

    image_dir = sys.argv[1]
    images_list = [
        os.path.join(image_dir, img) for img in os.listdir(image_dir)
        if os.path.splitext(img)[1] in const.IMG_EXT
    ]

    # Create a directory to save inference results
    if not os.path.isdir('./outputs'):
        os.mkdir('./outputs')

    # Create a directory to save the intermediate results of the large image detection
    if not os.path.isdir('./bigpic'):
        os.mkdir('./bigpic')

    # Create a directory to save the results of the big picture cropping inference
    outCrop = os.path.join('./bigpic', 'output')
    if not os.path.isdir(outCrop):
        os.mkdir(outCrop)

    # Create a directory, save the large and cropped pictures
    cropImg = os.path.join('./bigpic', 'cropimg')
    if not os.path.isdir(cropImg):
        os.mkdir(cropImg)

    image_info = construct_image_info()

    for image_file in images_list:
        imagename = get_file_name(image_file)
        tempfile = os.path.splitext(imagename)[0]

        imgdic = {}
        imgdic['name'] = imagename
        obj_res = []

        img = cv2.imread(image_file, -1)
        (width, height, depth) = img.shape
        if width > 1000 and height > 1000:
            # Create a directory to save the divided pictures of each big picture
            crop_target = os.path.join(cropImg, tempfile)
            if not os.path.isdir(crop_target):
                os.mkdir(crop_target)

            # Create a directory to save the inference results of each large image
            out_target = os.path.join(outCrop, tempfile)
            if not os.path.isdir(out_target):
                os.mkdir(out_target)

            # Large image clipping function
            crop_picture(image_file, crop_target)
            cropimg_list = [
                os.path.join(crop_target, imgs)
                for imgs in os.listdir(crop_target)
                if os.path.splitext(imgs)[1] in const.IMG_EXT
            ]

            # After the execution of the crop function is over,
            # the small picture after the big picture crop should be saved in a folder crop_target
            for cropimg_file in cropimg_list:
                print("the crop filename is :\t", cropimg_file)
                image = AclImage(cropimg_file)
                resized_image = pre_process(image, dvpp)
                result = model.execute([resized_image, image_info])
                resdic = post_process_big(result, image, cropimg_file,
                                          out_target)
                obj_res.extend(resdic)

            imgdic['object_result'] = obj_res

            merge_picture(out_target, tempfile)

        # Read in the picture, if the picture size is less than 1000x1000,
        # it will be read in and processed normally
        else:
            print("detect the small picture")
            image = AclImage(image_file)
            resized_image = pre_process(image, dvpp)
            print("pre process end")
            result = model.execute([resized_image, image_info])
            resdic = post_process(result, image, image_file)
            obj_res.extend(resdic)
            imgdic['object_result'] = obj_res
Exemplo n.º 27
0
        print("post process cost:", t4 - t3)
        print("FPS:", 1 / (t4 - t1))
        print("*" * 40)

        return inference_result
    else:
        return dict()


if __name__ == "__main__":
    # acl resource init
    acl_resource = AclResource()
    acl_resource.init()

    # load om model
    yolov3_model = Model(MODEL_PATH)

    # send the multicast message
    multicast_group = (LOCAL_IP_ADDRESS, PORT)
    sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
    connections = {}

    try:
        # Send data to the multicast group
        print('sending "%s"' % 'EtherSensePing' + str(multicast_group))
        sent = sock.sendto('EtherSensePing'.encode(), multicast_group)

        # defer waiting for a response using Asyncore
        client = EtherSenseClient()
        # print("data shape:", client._image_data.shape)
        asyncore.loop()