コード例 #1
0
ファイル: object_detect.py プロジェクト: tanmana5/samples
def post_process(infer_output, origin_img, image_file):
    """post process"""
    print("post process")
    print(infer_output[1])
    # box_num indicates the number of targets detected in the picture
    box_num = infer_output[1][0, 0]
    print("box num ", box_num)
    box_info = infer_output[0].flatten()
    print("\n")
    print(box_info[0:6 * box_num].reshape(6, box_num))
    scalex = utils.align_up128(origin_img.width) / MODEL_WIDTH
    scaley = utils.align_up16(origin_img.height) / MODEL_HEIGHT
    output_path = os.path.join("./outputs", os.path.basename(image_file))
    origin_image = Image.open(image_file)
    draw = ImageDraw.Draw(origin_image)
    font = ImageFont.truetype("SourceHanSansCN-Normal.ttf", size=30)
    print("images:{}".format(image_file))

    # Create a list to save Json results
    obj_res = []

    # Inference result output
    print("======== inference results: =============")
    for n in range(int(box_num)):
        ind = int(box_info[5 * int(box_num) + n])
        label = labels[ind]
        score = box_info[4 * int(box_num) + n]
        top_left_x = box_info[0 * int(box_num) + n] * scalex
        top_left_y = box_info[1 * int(box_num) + n] * scaley
        bottom_right_x = box_info[2 * int(box_num) + n] * scalex
        bottom_right_y = box_info[3 * int(box_num) + n] * scaley
        # Output the target name, category number, coordinate position, and detection rate in turn
        print("%s: class %d, box %d %d %d %d, score %f" %
              (label, ind, top_left_x, top_left_y, bottom_right_x,
               bottom_right_y, score))
        # Mark the test results on the picture
        draw.line([(top_left_x, top_left_y), (bottom_right_x, top_left_y),
                   (bottom_right_x, bottom_right_y),
                   (top_left_x, bottom_right_y), (top_left_x, top_left_y)],
                  fill=(0, 200, 100),
                  width=2)
        draw.text((top_left_x, top_left_y), label, font=font, fill=255)

        # json data
        obj = {}
        obj['label'] = label
        obj['score'] = score

        points = {}
        points['lx'] = top_left_x
        points['ly'] = top_left_y
        points['rx'] = bottom_right_x
        points['ry'] = bottom_right_y
        obj['points'] = points

        obj_res.append(obj)

    #Save the final test result picture
    origin_image.save(output_path)
    return obj_res
コード例 #2
0
ファイル: acl_dvpp.py プロジェクト: Ascend/canncamp
    def jpegd(self, image):
        """
        jepg image to yuv image
        """
        # Create conversion output image desc
        output_desc, out_buffer = self._gen_jpegd_out_pic_desc(image)
        ret = acl.media.dvpp_jpeg_decode_async(self._dvpp_channel_desc,
                                               image.data(),
                                               image.size,
                                               output_desc,
                                               self._stream)
        if ret != constants.ACL_ERROR_NONE:
            log_error("dvpp_jpeg_decode_async failed ret={}".format(ret))
            return None

        ret = acl.rt.synchronize_stream(self._stream)
        if ret != constants.ACL_ERROR_NONE:
            log_error("dvpp_jpeg_decode_async failed ret={}".format(ret))
            return None

        # Return the decoded AclImage instance
        stride_width = utils.align_up128(image.width)
        stride_height = utils.align_up16(image.height)
        stride_size = utils.yuv420sp_size(stride_width, stride_height)
        return AclImage(out_buffer, stride_width,
                        stride_height, stride_size, constants.MEMORY_DVPP)
コード例 #3
0
ファイル: object_detect.py プロジェクト: tanmana5/samples
def post_process_big(infer_output, origin_img, image_file, out_target):
    """post process big"""
    print("post process")
    print(infer_output[1])
    box_num = infer_output[1][0, 0]
    print("box num ", box_num)
    box_info = infer_output[0].flatten()
    print("\n")
    print(box_info[0:6 * box_num].reshape(6, box_num))
    scalex = utils.align_up128(origin_img.width) / MODEL_WIDTH
    scaley = utils.align_up16(origin_img.height) / MODEL_HEIGHT
    output_path = os.path.join(out_target, os.path.basename(image_file))
    origin_image = Image.open(image_file)
    draw = ImageDraw.Draw(origin_image)
    font = ImageFont.truetype("../SourceHanSansCN-Normal.ttf", size=30)
    print("images:{}".format(image_file))
    print("======== inference results: =============")

    imagename = get_file_name(image_file)

    # Get the number in the name of the cropped picture
    x = imagename.split("_")
    row_num = (int)(x[0])
    y = x[1].split(".")
    col_num = (int)(y[0])

    obj_res = []

    for n in range(int(box_num)):
        ind = int(box_info[5 * int(box_num) + n])
        label = labels[ind]
        score = box_info[4 * int(box_num) + n]
        top_left_x = box_info[0 * int(box_num) + n] * scalex
        top_left_y = box_info[1 * int(box_num) + n] * scaley
        bottom_right_x = box_info[2 * int(box_num) + n] * scalex
        bottom_right_y = box_info[3 * int(box_num) + n] * scaley
        print("%s: class %d, box %d %d %d %d, score %f" %
              (label, ind, top_left_x, top_left_y, bottom_right_x,
               bottom_right_y, score))
        draw.line([(top_left_x, top_left_y), (bottom_right_x, top_left_y),
                   (bottom_right_x, bottom_right_y),
                   (top_left_x, bottom_right_y), (top_left_x, top_left_y)],
                  fill=(0, 200, 100),
                  width=2)
        draw.text((top_left_x, top_left_y), label, font=font, fill=255)

        # Big picture mapping coordinates
        big_lx = top_left_x + (col_num - 1) * 832
        big_ly = top_left_y + (row_num - 1) * 832
        big_rx = bottom_right_x + (col_num - 1) * 832
        big_ry = bottom_right_y + (row_num - 1) * 832

        #json data
        obj = {}
        obj['label'] = label
        obj['score'] = score

        points = {}
        points['lx'] = big_lx
        points['ly'] = big_ly
        points['rx'] = big_rx
        points['ry'] = big_ry
        obj['points'] = points

        obj_res.append(obj)

    origin_image.save(output_path)
    return obj_res