コード例 #1
0
def testRollerBlock():
    import matplotlib.pyplot as plt
    import platform
    import cv2
    import os
    if "Windows" in platform.system():
        filename = r"D:\chengxu\python\project\digitRecognise\com\huitong\gasMeterv1\data\img\style1\2.jpg"
    elif "Linux" in platform.system():
        filename = r"/home/allen/work/digitRecognise/com/huitong/gasMeterv1/data/img/style1/000.jpg"

    style1 = GasmeterStyle1(desImageDepth=1)
    image = cv2.imread(filename)

    ImageTool.showImagePIL(image)

    style1.setImage(image)

    rollerBlackImage = style1.getRollerBlackArea()
    # rollerBlackImage = cv2.cvtColor(rollerBlackImage,cv2.COLOR_BGR2RGB)
    title = str(rollerBlackImage.shape) + os.path.basename(filename)
    # rollerBlackImage = ImageTool.convertImgBGR2Gray(rollerBlackImage)

    # ret,rollerBlackImage = ImageTool.getOTSUGrayImage(rollerBlackImage)

    plt.figure()
    plt.imshow(rollerBlackImage)
    plt.title(title)
    plt.show()
コード例 #2
0
def getPredict(hps, mode, gasmeter_filename, save_file_name):
    xp = tf.placeholder(
        tf.float32,
        [None, captchaBoxHeight * captchaBoxWidth * gen.ImageDepth])
    yp = tf.placeholder(tf.float32,
                        [None, captchaCharacterLength * CHAR_SET_LEN])
    model = ResNetModel.ResNetModel(hps, xp, yp, mode, captchaBoxHeight,
                                    captchaBoxWidth, gen.ImageDepth)
    model.create_graph(captchaCharacterLength)

    with tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) as sess:
        saver = tf.train.Saver()
        saver.restore(sess, save_file_name)

        # image = Image.open(gasmeter_filename)
        # image = image.resize((captchaBoxWidth,captchaBoxHeight), Image.BICUBIC)
        image = cv2.imread(gasmeter_filename)
        image = ImageTool.imageResize(image, captchaBoxWidth, captchaBoxHeight)

        if gen.ImageDepth == 1:
            image = ImageTool.convertImgBGR2Gray(image)
            # image = ImageTool.convertImgRGB2Gray(image)

        ImageTool.showImagePIL(image)

        images = ImageTool.repeatImage2Tensor(image, hps.batch_nums)

        feed_dict = {xp: images, model.is_training_ph: False}

        outputs = sess.run([model.outputs], feed_dict=feed_dict)
        text = get_predict_text(outputs)
    return text
コード例 #3
0
    def getLCDAreaData(image):
        """
        获取 LCD 区域图片数据
        :param image: cv2 读进来的图片对象
        """
        image = ImageTool.preProcessImage(image)
        blackMask = MaskTool.getBlackMaskBGR()
        image = blackMask.getInterestImageAreaData(image)

        shape = image.shape
        grayImage = ImageTool.convertImgBGR2Gray(image)
        mid = int(shape[0] / 2)
        splitImageBox = (0, mid, shape[1], shape[0])
        splitImageGray = ImageTool.getCropImageByBox(grayImage, splitImageBox)
        splitImage = ImageTool.getCropImageByBox(image, splitImageBox)

        # 显示剪切的 lcd 屏所在的下半屏灰度图
        ImageTool.showImagePIL(splitImageGray, "splitImageGray")

        retval, otsuImage = ImageTool.getOTSUGrayImage(splitImageGray)
        otsuImage = ImageTool.convertImgGray2BGR(otsuImage)

        lower = (250, 250, 250)
        upper = (255, 255, 255)
        lcdBoxCorner = ImageTool.getInterestBoxCornerPointByColor(
            otsuImage, lower, upper)
        lcdBox = LCDLightDetect.getMinBox(lcdBoxCorner)
        # ImageTool.showBoxInImageByBox(splitImage, lcdBox)

        lcdImage = ImageTool.getCropImageByBox(splitImage, lcdBox)
        return lcdImage
コード例 #4
0
def getPredict(hps, mode, gasmeter_filename, save_file_name):
    xp = tf.placeholder(
        tf.float32,
        [None, captchaBoxHeight * captchaBoxWidth * gen.ImageDepth])
    yp = tf.placeholder(tf.float32,
                        [None, captchaCharacterLength * CHAR_SET_LEN])
    model = ResNetModel.ResNetModel(hps, xp, yp, mode, captchaBoxHeight,
                                    captchaBoxWidth, gen.ImageDepth)
    model.create_graph(captchaCharacterLength)

    with tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) as sess:
        saver = tf.train.Saver()
        saver.restore(sess, save_file_name)

        # images = gen.get_batch_gasmeter_digit_area_from_filename(gasmeter_filename, hps.batch_nums)
        image = cv2.imread(gasmeter_filename)
        # gasmeter = GasmeterStyle0(captchaBoxWidth,captchaBoxHeight,desImageDepth=gen.ImageDepth)
        gasmeter = GasmeterStyle1(captchaBoxWidth,
                                  captchaBoxHeight,
                                  desImageDepth=gen.ImageDepth)
        gasmeter.setImage(image)
        image = gasmeter.getRollerBlackArea()

        ImageTool.showImagePIL(image)

        images = ImageTool.repeatImage2Tensor(image, hps.batch_nums)

        feed_dict = {xp: images, model.is_training_ph: False}

        outputs = sess.run([model.outputs], feed_dict=feed_dict)
        text = get_predict_text(outputs)
    return text
コード例 #5
0
ファイル: test.py プロジェクト: crazyCodeLove/digitRecognise
def fun8():
    from com.huitong.gasMeterv1.framework.tool.GenDigitsImage import GenDigitsPicture
    characterLength = 1
    width = 15
    height = 30
    bkgColor = (20,20,20)
    fontColor = (200,200,200)
    fontSizes = (29,)

    gen = GenDigitsPicture(characterLength, width, height)

    while True:
        text,image = gen.get_text_and_image(backgroundColor=bkgColor,fontColor=fontColor,fontSizes=fontSizes)
        ImageTool.showImagePIL(image,text)
コード例 #6
0
def getPredict(hps, mode, save_file_name, gen):
    xp = tf.placeholder(
        tf.float32,
        [None, captchaBoxHeight * captchaBoxWidth * gen.ImageDepth])
    yp = tf.placeholder(tf.float32,
                        [None, captchaCharacterLength * CHAR_SET_LEN])
    model = ResNetModel.ResNetModel(hps, xp, yp, mode, captchaBoxHeight,
                                    captchaBoxWidth, gen.ImageDepth)
    model.create_graph(captchaCharacterLength)

    gen1 = GenImageGasMeterStyle1m1(captchaCharacterLength,
                                    captchaBoxWidth,
                                    captchaBoxHeight,
                                    imageDepth=1)
    gen2 = GenImageGasMeterStyle1m2(captchaCharacterLength,
                                    captchaBoxWidth,
                                    captchaBoxHeight,
                                    imageDepth=1)
    gen3 = GenImageGasMeterStyle1m3(captchaCharacterLength,
                                    captchaBoxWidth,
                                    captchaBoxHeight,
                                    imageDepth=1)

    gens = [gen1, gen2, gen3]

    with tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) as sess:
        saver = tf.train.Saver()
        saver.restore(sess, save_file_name)

        while True:
            gen = random.choice(gens)
            oriText, image = gen.get_text_and_image()
            images = ImageTool.repeatImage2Tensor(image, hps.batch_nums)

            feed_dict = {xp: images, model.is_training_ph: False}

            outputs = sess.run([model.outputs], feed_dict=feed_dict)
            predictText = get_predict_text(outputs)

            title = "text:%s, predict:%s" % (oriText, predictText)

            ImageTool.showImagePIL(image, title)
            print(title)
コード例 #7
0
def test():
    import cv2
    import platform
    import os
    style = GasmeterStyle0(desImageDepth=1)

    if "Linux" in platform.system():
        filename = r'/home/allen/work/digitRecognise/com/huitong/gasMeterv1/data/img/style0/10.jpg'
    elif "Windows" in platform.system():
        filename = r"D:\chengxu\python\project\digitRecognise\com\huitong\gasMeterv1\data\img\style0\10.jpg"

    image = cv2.imread(filename)
    title = os.path.basename(filename)
    style.setImage(image)

    image = style.getRollerBlackArea()
    # ret, image = ImageTool.getOTSUGrayImage(image)

    # image = cv2.cvtColor(image,cv2.COLOR_BGR2RGB)
    ImageTool.showImagePIL(image, str(image.shape) + title)
コード例 #8
0
    def getLCDBox(self):
        """
        """
        shape = self._image.shape
        mid = int(shape[0] / 2)
        splitImageBox = (0, mid, shape[1], shape[0])
        splitImageGray = ImageTool.getCropImageByBox(self._grayImage,
                                                     splitImageBox)
        ImageTool.showImagePIL(splitImageGray)

        splitImage = ImageTool.getCropImageByBox(self._image, splitImageBox)

        retval, otsuImage = ImageTool.getOTSUGrayImage(splitImageGray)
        otsuImage = ImageTool.convertImgGray2BGR(otsuImage)

        lower = (250, 250, 250)
        upper = (255, 255, 255)
        lcdBoxCorner = ImageTool.getInterestBoxCornerPointByColor(
            otsuImage, lower, upper)
        lcdBox = ImageTool.getBoxFromBoxCorner(lcdBoxCorner)
        self._lcdBox = lcdBox

        ImageTool.showBoxInImageByBoxCornerPoint(splitImage, lcdBoxCorner,
                                                 "lcd")
コード例 #9
0
    def lcdLighted(lcdImage):
        """
        检测lcd 区域图片是否点亮,如果亮返回True,如果不亮返回False
        :param lcdImage: cv2 读进来的图片对象
        """
        lower = (0, 0, 0)
        upper = (50, 50, 50)

        blackBoxCorner = ImageTool.getInterestBoxCornerPointByColor(
            lcdImage, lower, upper)
        # 如果获得的 lcd 区域有黑色区域,说明没有点亮。
        if blackBoxCorner is not None:
            return False

        lcdImageGray = ImageTool.convertImgBGR2Gray(lcdImage)

        retval, otsuImage = ImageTool.getOTSUGrayImage(lcdImageGray)
        otsuImage = ImageTool.convertImgGray2BGR(otsuImage)

        notLcdBoxCorner = ImageTool.getInterestBoxCornerPointByColor(
            otsuImage, lower, upper)
        ImageTool.showImagePIL(lcdImage, "lcdimage")

        return notLcdBoxCorner is None
コード例 #10
0
ファイル: test.py プロジェクト: crazyCodeLove/digitRecognise
def fun9():
    filename = r"D:\chengxu\python\project\digitRecognise\com\huitong\gasMeterv1\data\img\gasmeterRoller\000041.jpg"
    image = Image.open(filename)
    image = image.resize((128,64),Image.CUBIC)
    ImageTool.showImagePIL(image,"des")