def normalPressure(image, allinfo): """ :param image: ROI image :param info: information for this meter :return: value """ template = None flag = None info = None if type(allinfo) is list: for i in range(len(allinfo)): # todo 用传入的 多个模板 进行一个个的匹配,有一个匹配到则跳出循环 info = allinfo[i] template, flag = meterFinderBySIFT( image, info) # todo flag==0 则是没有匹配到,继续用下一个模板匹配 if flag == 0: continue else: break else: template, flag = meterFinderBySIFT(image, allinfo) info = copy.deepcopy(allinfo) if flag == 0: print('not find template!!!') result = scanPointer(template, info) # todo 调用 指针 识别函数 result = int(result * 1000) / 1000 return [result]
def colorIndicator(ROI, allinfo): res = [0] template = None flag = None info = None if type(allinfo) is list: for i in range(len(allinfo)): info = allinfo[i] template, flag = meterFinderBySIFT( ROI, info) # todo flag==0 则是没有匹配到,继续用下一个模板匹配 if flag == 0: continue else: break else: template, flag = meterFinderBySIFT(ROI, allinfo) info = copy.deepcopy(allinfo) if flag == 0: print('not find template!!!') HSV = cv2.cvtColor(template, cv2.COLOR_BGR2HSV) # color = [np.array([26, 43, 46]), np.array([34, 255, 255])] # todo 根据颜色进行识别,结合实际图片 color = [np.array([11, 43, 46]), np.array([34, 255, 255])] Lower = color[0] Upper = color[1] mask = cv2.inRange(HSV, Lower, Upper) upmask = mask[int(0.25 * mask.shape[0]):int(0.5 * mask.shape[0]), :] upmask = cv2.bitwise_and(np.ones(upmask.shape, np.uint8), upmask) if np.sum(upmask) / upmask.shape[0] * upmask.shape[1] > 0.2: res = [1] return res
def onoffBattery(image, allinfo): """ 识别电池屏内部的开关是向上还是向下 :param image: :param info: :return: """ template = None flag = None info = None if type(allinfo) is list: for i in range(len(allinfo)): info = allinfo[i] template, flag = meterFinderBySIFT(image, info) #todo flag==0 则是没有匹配到,继续用下一个模板匹配 if flag == 0: continue else: break else: template, flag = meterFinderBySIFT(image, allinfo) info = copy.deepcopy(allinfo) if flag == 0: print('not find template!!!') gray = cv2.cvtColor(template, cv2.COLOR_BGR2GRAY) start = ([info["startPoint"]["x"], info["startPoint"]["y"]]) end = ([info["endPoint"]["x"], info["endPoint"]["y"]]) center = ([info["centerPoint"]["x"], info["centerPoint"]["y"]]) # width = info["rectangle"]["width"] # height = info["rectangle"]["height"] width = 40 height = 80 fourth = (start[0] + end[0] - center[0], start[1] + end[1] - center[1]) pts1 = np.float32([start, center, end, fourth]) pts2 = np.float32([[0, 0], [width, 0], [width, height], [0, height]]) M = cv2.getPerspectiveTransform(pts1, pts2) dst = cv2.warpPerspective(gray, M, (width, height)) # 自适应二值化 ret, thresh = cv2.threshold(dst, 0, 255, cv2.THRESH_OTSU) # thresh = cv2.adaptiveThreshold(dst, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY_INV, 17, 11) #todo 比较上下区域的色度和,若上部区域比下部区域小,则说明开关是向上打开 upRegion = np.mean(thresh[:40]) downRegion = np.mean(thresh[40:]) if ifShow: imgShow = np.hstack((dst, thresh)) cv2.imshow("ret", imgShow) cv2.waitKey(0) print(upRegion, downRegion) return ["on"] if upRegion < downRegion else ["off"]
def switch(image, allinfo): # delete_file.delete_files() template = None flag = None info = None if type(allinfo) is list: for i in range(len(allinfo)): info = allinfo[i] template, flag = meterFinderBySIFT( image, info) #todo flag==0 则是没有匹配到,继续用下一个模板匹配 if flag == 0: continue else: break else: template, flag = meterFinderBySIFT(image, allinfo) info = copy.deepcopy(allinfo) if flag == 0: # print('not find template!!!') pass h, w, _ = template.shape left = template[:, 0:w // 2].copy() # todo 存储数据 #store_num = len(os.listdir("./store")) #cv2.imwrite("./store/left_"+str(store_num)+".jpg",left) left = cv2.resize(left, (40, 40)) left = np.expand_dims(left, 0) # todo 分为左右两个部分 right = template[:, w // 2:].copy() # # todo 存储数据 # store_num = len(os.listdir("./store")) # cv2.imwrite("./store/right_"+str(store_num)+".jpg",right) right = cv2.resize(right, (40, 40)) right = np.expand_dims(right, 0) net = switchNet() # todo 加载模型 net.load_state_dict( torch.load("Algorithm/OCR/onoff/model/left_switch_net.pth")) input = torch.tensor(left, dtype=torch.float32).permute((0, 3, 1, 2)) out = net(input) out = out.detach().numpy() # 开 是 1 关 是 0 left_index = np.argmax(out) net1 = switchNet() # todo 加载模型 net1.load_state_dict( torch.load("Algorithm/OCR/onoff/model/right_switch_net.pth")) input = torch.tensor(right, dtype=torch.float32).permute((0, 3, 1, 2)) out = net(input) out = out.detach().numpy() # 开 是 1 关 是 0 right_index = np.argmax(out) return [left_index, right_index]
def digitPressure(image, allinfo): ''' :param image: 识别的图片 :param allinfo: 对应的 所有的 json文件 :return: 返回识别结果 ''' del_file(CUR_PATH) # todo 每次调用函数时,需要把上次存的图片删除,才能存储本次要识别的图片 # delete_file.delete_files() template = None flag = None info = None if type(allinfo) is list: # todo 用传入的 多个模板 进行一个个的匹配,有一个匹配到则跳出循环 for i in range(len(allinfo)): info = allinfo[i] # todo template 是根据模板 提取出图片上的区域 template, flag = meterFinderBySIFT( image, info) #todo flag==0 则是没有匹配到,继续用下一个模板匹配 if flag == 0: continue else: break else: template, flag = meterFinderBySIFT(image, allinfo) info = copy.deepcopy(allinfo) if flag == 0: # print('not find template!!!') pass # todo 这时 要识别的区域和对应的json文件已经准备好,开始进行数字识别 myRes = rgbRecognize(template, info) merge = info["merge"] # todo 上下两行数字是否进行 合并 decimal = info["decimal"] # todo 小数点的位置 # todo 这个只是对识别结果进行一些格式上的处理 for i in range(len(myRes)): temp = "" for j, c in enumerate(myRes[i]): if c != "?": temp += c elif j != 0: temp += str(random.randint(0, 9)) myRes[i] = float(temp) if temp != "" else 0.0 if merge == 'true': a = myRes[0] * 10**decimal[1] + myRes[1] myRes.clear() myRes.append(a) return myRes
def springStatus(ROI, info): # template = info['template'] # img = meterFinderByTemplate(ROI, template) img = meterFinderBySIFT(ROI, info) img = boxRectifier(img, info) high, width = img.shape[:2] i = 0 y = 2 * high // 5 # y坐标大约是2/5的高度 x = 0 while width - 6 - i > 0: # 比较相邻像素的三个通道的差值 从右往左比较 a1, a2 = img[y][width - 5 - i][0].astype(int), img[y][width - 5 - i - 1][0].astype( int) # 这里的5是为了防止扣取的模板不精确(防止边框抠出来的情况) b1, b2 = img[y][width - 5 - i][1].astype(int), img[y][width - 5 - i - 1][1].astype(int) c1, c2 = img[y][width - 5 - i][2].astype(int), img[y][width - 5 - i - 1][2].astype(int) if abs(a1 - a2) < 20 and abs(b1 - b2) < 20 and abs( c1 - c2) < 20: # 如果相邻像素值近似,继续循环 i += 1 else: # 不相似,输出当前点的x值 x = width - 5 - i - 1 break # cv2.circle(img, (x, y), 2, (0, 0, 255), -1) # cv2.imshow("ewe", img) if x > 2 * width // 3: return "on" else: return "off"
def onoffBatteryHardCode(image, info): """ 识别电池屏内部的开关是向上还是向下 :param image: image :param info: information :return: """ meter = meterFinderBySIFT(image, info) # print(meter.shape) meter = cv2.cvtColor(meter, cv2.COLOR_BGR2GRAY) meter = cv2.resize(meter, (200, 200)) # 截取中心区域 center = meter[60:120, 85:115] # 自适应二值化 ret, thresh = cv2.threshold(center, 0, 255, cv2.THRESH_OTSU) # 比较上下区域的色度和,若上部区域比下部区域小,则说明开关是向上打开 upRegion = np.sum(meter[:30]) downRegion = np.sum(meter[30:]) if ifShow: imgShow = np.hstack((center, thresh)) cv2.imshow("ret", imgShow) cv2.waitKey(0) print(upRegion, downRegion) return 1 if upRegion < downRegion else 0
def normalPressure(image, info): """ :param image: ROI image :param info: information for this meter :return: value """ meter = meterFinderBySIFT(image, info) result = scanPointer(meter, info) result = int(result * 1000) / 1000 return [result]
def oilTempreture(image, info): """ :param image: ROI image :param info: information for this meter :return: value """ start = np.array([info["startPoint"]["x"], info["startPoint"]["y"]]) end = np.array([info["endPoint"]["x"], info["endPoint"]["y"]]) center = np.array([info["centerPoint"]["x"], info["centerPoint"]["y"]]) meter = meterFinderBySIFT(image, info["template"]) h, w, _ = meter.shape fixHeight = 300 fixWidth = int(meter.shape[1] / meter.shape[0] * fixHeight) resizeCoffX = fixWidth / meter.shape[1] meter = cv2.resize(meter, (fixWidth, fixHeight)) start = (start * resizeCoffX).astype(np.int16) end = (end * resizeCoffX).astype(np.int16) center = (center * resizeCoffX).astype(np.int16) meter[int(0.6 * meter.shape[0]):] *= 0 mask_meter_red = color_detection(meter, red_range) point_red = contours_check(mask_meter_red, center) degree_red = AngleFactory.calPointerValueByOuterPoint( start, end, center, point_red, info["startValue"], info["totalValue"]) cv2.destroyAllWindows() mask_meter_white = color_detection(meter, white_range) point_white = contours_check(mask_meter_white, center) degree_white = AngleFactory.calPointerValueByOuterPoint( start, end, center, point_white, info["startValue"], info["totalValue"]) if ifShow: print("white degree {:.2f}".format(degree_white)) print("red degree {:.2f}".format(degree_red)) cv2.circle(meter, (start[0], start[1]), 5, (0, 0, 255), -1) cv2.circle(meter, (end[0], end[1]), 5, (0, 0, 255), -1) cv2.circle(meter, (center[0], center[1]), 5, (0, 0, 255), -1) cv2.circle(meter, (point_white[0], point_white[1]), 5, (255, 255, 255), -1) cv2.line(meter, (center[0], center[1]), (point_white[0], point_white[1]), (255, 255, 255), 5) cv2.line(meter, (center[0], center[1]), (point_red[0], point_red[1]), (0, 0, 255), 5) cv2.circle(meter, (point_red[0], point_red[1]), 5, (0, 0, 255), -1) cv2.imshow("meter", meter) cv2.waitKey(0) return int(degree_red * 100) / 100, int(degree_white * 100) / 100
def colorIndicator(ROI, info): res = 0 image = meterFinderBySIFT(ROI, info) HSV = cv2.cvtColor(image, cv2.COLOR_BGR2HSV) # color = [np.array([26, 43, 46]), np.array([34, 255, 255])] color = [np.array([11, 43, 46]), np.array([34, 255, 255])] Lower = color[0] Upper = color[1] mask = cv2.inRange(HSV, Lower, Upper) upmask = mask[int(0.25 * mask.shape[0]):int(0.5 * mask.shape[0]), :] upmask = cv2.bitwise_and(np.ones(upmask.shape, np.uint8), upmask) if np.sum(upmask) / upmask.shape[0] * upmask.shape[1] > 0.2: res = 1 return res
def checkFrame(net, image, info): """ 判断图片的类型A,AB等 :param image: image :param info: info :return: 出现次序0.1.2.3 """ start = ([info["startPoint"]["x"], info["startPoint"]["y"]]) end = ([info["endPoint"]["x"], info["endPoint"]["y"]]) center = ([info["centerPoint"]["x"], info["centerPoint"]["y"]]) width = info["rectangle"]["width"] height = info["rectangle"]["height"] widthSplit = info["widthSplit"] heightSplit = info["heightSplit"] characSplit = info["characSplit"] # 计算数字表的矩形外框,并且拉直矫正 fourth = (start[0] + end[0] - center[0], start[1] + end[1] - center[1]) pts1 = np.float32([start, center, end, fourth]) pts2 = np.float32([[0, 0], [width, 0], [width, height], [0, height]]) M = cv2.getPerspectiveTransform(pts1, pts2) template = meterFinderBySIFT(image, info) template = cv2.cvtColor(template, cv2.COLOR_BGR2GRAY) template = cv2.equalizeHist(template) dst = cv2.warpPerspective(template, M, (width, height)) imgType = dst[characSplit[1][0]:characSplit[1][1], characSplit[0][0]:characSplit[0][1]] imgType = cv2.adaptiveThreshold(imgType, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 17, 11) imgType = cv2.bitwise_not(imgType) # orimage = imgType.copy() imgType = torch.Tensor(np.array(imgType, dtype=np.float32)) imgType = torch.unsqueeze(imgType, 0) imgType = torch.unsqueeze(imgType, 0) type_probe = net.forward(imgType) type_probe = type_probe.detach().numpy() maxIndex = np.argmax(type_probe) # debug # cv2.imshow(str(chr(maxIndex+ord('A'))), orimage) # type = str(chr(maxIndex+ord('A'))) # cv2.imwrite(os.path.join("video_result", type+info['name']+'_'+str(type_probe)+'.jpg'), orimage) # cv2.waitKey(0) return maxIndex
def onoffBattery(image, info): """ 识别电池屏内部的开关是向上还是向下 :param image: :param info: :return: """ meter = meterFinderBySIFT(image, info) gray = cv2.cvtColor(meter, cv2.COLOR_BGR2GRAY) start = ([info["startPoint"]["x"], info["startPoint"]["y"]]) end = ([info["endPoint"]["x"], info["endPoint"]["y"]]) center = ([info["centerPoint"]["x"], info["centerPoint"]["y"]]) # width = info["rectangle"]["width"] # height = info["rectangle"]["height"] width = 40 height = 80 fourth = (start[0] + end[0] - center[0], start[1] + end[1] - center[1]) pts1 = np.float32([start, center, end, fourth]) pts2 = np.float32([[0, 0], [width, 0], [width, height], [0, height]]) M = cv2.getPerspectiveTransform(pts1, pts2) dst = cv2.warpPerspective(gray, M, (width, height)) # 自适应二值化 ret, thresh = cv2.threshold(dst, 0, 255, cv2.THRESH_OTSU) # thresh = cv2.adaptiveThreshold(dst, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY_INV, 17, 11) # 比较上下区域的色度和,若上部区域比下部区域小,则说明开关是向上打开 upRegion = np.mean(thresh[:40]) downRegion = np.mean(thresh[40:]) if ifShow: imgShow = np.hstack((dst, thresh)) cv2.imshow("ret", imgShow) cv2.waitKey(0) print(upRegion, downRegion) return "on" if upRegion < downRegion else "off"
def digitPressure(image, info): template = meterFinderBySIFT(image, info) # 存储图片 if not os.path.exists("storeDigitData"): os.mkdir("storeDigitData") try: os.mkdir("storeDigitData/thresh") os.mkdir("storeDigitData/rgb") except IOError: pass for i in range(11): try: os.mkdir("storeDigitData/thresh/" + str(i)) os.mkdir("storeDigitData/rgb/" + str(i)) except IOError: continue if 'rgb' in info and info['rgb']: # rgb as input myRes = rgbRecognize(template, info) else: myRes = bitRecognize(template, info) if info["digitType"] == "KWH": myRes[0] = myRes[0][:4] + myRes.pop(1) # 去除头部的非数字字符,同时将非头部的字符转为数字 for i in range(len(myRes)): temp = "" for j, c in enumerate(myRes[i]): if c != "?": temp += c elif j != 0: temp += str(random.randint(0, 9)) myRes[i] = float(temp) if temp != "" else 0.0 return myRes
def contactStatus(image, info): """ :param image:whole image :param info:开关配置 :return: True代表O,False代表1 """ template = meterFinderBySIFT(image, info) template = cv2.GaussianBlur(template, (3, 3), 0) # 获取检测区域 dst = boxRectifier(template, info) debug = False if debug: cv2.imshow("image", image) cv2.imshow("template", info['template']) cv2.waitKey(0) # 获取图像灰度矩阵 image = getMatrix(dst) # 边缘检测 image = cv2.Canny(image, 10, 200) return hasCircle(image)
def videoDigit(video, allinfo): """ :param video: VideoCapture Input :param info: info :return: """ net = characterNet(is_rgb=True) # todo 创建并加载 模型 net.load_state_dict( torch.load("Algorithm/OCR/character/rbg2.pkl", map_location='cpu')) pictures = getPictures(video) # todo 获得视频的帧,有少量重复帧 def emptyLsit(): return [] imagesDict = defaultdict(emptyLsit) imagesDict2 = { 'A': -1, 'B': -1, 'C': -1 } # todo 防止没有识别出 ABC 三种帧 根据视频观察出 每隔120帧会变换利用这个特点进行第二种方法的编写 box = None newinfo = None newchange = None saveframe = [] for i, frame in enumerate(pictures): # res = digitPressure(frame, copy.deepcopy(info)) for j in range(len(allinfo)): eachinfo = copy.deepcopy(allinfo[j]) template, flag = meterFinderBySIFT( frame, eachinfo) # todo flag==0 则是没有匹配到,继续用下一个模板匹配 if flag == 0: continue else: break if flag == 0: # print("not find template!!!") pass info = copy.deepcopy(eachinfo) index, charimg = checkFrame(i, net, template, eachinfo) # todo 识别当前的帧到底是属于哪一个类别 if index < 3: res = rgbRecognize(template, eachinfo) # todo 得出 数字的识别结果,存到对应的key值下 imagesDict[chr(index + ord('A'))] += [res] if len(imagesDict['A']) != 0 and imagesDict2['B'] == -1: imagesDict2['B'] = (i + 3) % len(pictures) if len(imagesDict['B']) != 0 and imagesDict2['C'] == -1: imagesDict2['C'] = (i + 3) % len(pictures) if len(imagesDict['C']) != 0 and imagesDict2['A'] == -1: imagesDict2['A'] = (i + 6) % len(pictures) for key, val in imagesDict.items(): if len(val) == 0: res = digitPressure(pictures[imagesDict2[key]], allinfo) imagesDict[key] += [res] imagesDict = getResult(imagesDict) #todo 对 结果做一些处理 return imagesDict
def readyStatus(img, info): template = info['template'] # match_res =meterFinderByTemplate(img, info['template']) match_res = meterFinderBySIFT(img, info) image = boxRectifier(match_res, info) # cv2.imshow('Image', image) # cv2.waitKey(0) # if image is dark enough, do gamma correction for enhancing dark details if isDark(image): max = np.max(image) image = np.power(image / float(max), 1 / 3) * max image = image.astype(np.uint8) # cv2.imshow('Gamma', image) # cv2.waitKey(0) t_shape = template.shape[:2] # image = meterFinderBySIFT(img, info) orig = image.copy() # minimum probability required to inspect a region min_confidence = 0.5 # load the input image and grab the image dimensions (H, W) = image.shape[:2] # path to input EAST text detector current_path = os.path.dirname(os.path.abspath(__file__)) + os.path.sep model_path = os.path.abspath(current_path + "/frozen_east_text_detection.pb") # set the new width and height and then determine the ratio in change # for both the width and height # image width should be multiple of 32, so do height newW = (t_shape[0]) // 32 * 32 newH = (t_shape[1]) // 32 * 32 rW = W / float(newW) rH = H / float(newH) # resize the image and grab the new image dimensions image = cv2.resize(image, (newW, newH)) (H, W) = image.shape[:2] # define the two output layer names for the EAST detector model that # we are interested -- the first is the output probabilities and the # second can be used to derive the bounding box coordinates of text layerNames = [ "feature_fusion/Conv_7/Sigmoid", "feature_fusion/concat_3"] # load the pre-trained EAST text detector # print("[INFO] loading EAST text detector...") net = cv2.dnn.readNet(model_path) # construct a blob from the image and then perform a forward pass of # the model to obtain the two output layer sets blob = cv2.dnn.blobFromImage(image, 1.0, (W, H), (123.68, 116.78, 103.94), swapRB=True, crop=False) start = time.time() net.setInput(blob) (scores, geometry) = net.forward(layerNames) end = time.time() # show timing information on text prediction # print("[INFO] text detection took {:.6f} seconds".format(end - start)) # grab the number of rows and columns from the scores volume, then # initialize our set of bounding box rectangles and corresponding # confidence scores (numRows, numCols) = scores.shape[2:4] rects = [] confidences = [] # loop over the number of rows for y in range(0, numRows): # extract the scores (probabilities), followed by the geometrical # data used to derive potential bounding box coordinates that # surround text scoresData = scores[0, 0, y] xData0 = geometry[0, 0, y] xData1 = geometry[0, 1, y] xData2 = geometry[0, 2, y] xData3 = geometry[0, 3, y] anglesData = geometry[0, 4, y] # loop over the number of columns for x in range(0, numCols): # if our score does not have sufficient probability, ignore it if scoresData[x] < min_confidence: continue # compute the offset factor as our resulting feature maps will # be 4x smaller than the input image (offsetX, offsetY) = (x * 4.0, y * 4.0) # extract the rotation angle for the prediction and then # compute the sin and cosine angle = anglesData[x] cos = np.cos(angle) sin = np.sin(angle) # use the geometry volume to derive the width and height of # the bounding box h = xData0[x] + xData2[x] w = xData1[x] + xData3[x] # compute both the starting and ending (x, y)-coordinates for # the text prediction bounding box endX = int(offsetX + (cos * xData1[x]) + (sin * xData2[x])) endY = int(offsetY - (sin * xData1[x]) + (cos * xData2[x])) startX = int(endX - w) startY = int(endY - h) # add the bounding box coordinates and probability score to # our respective lists rects.append((startX, startY, endX, endY)) confidences.append(scoresData[x]) if len(rects) > 0: return "on" else: return "off"
def digitPressure(image, info): template = meterFinderBySIFT(image, info) template = cv2.GaussianBlur(template, (3, 3), 0) # 读取标定信息 widthSplit = info["widthSplit"] heightSplit = info["heightSplit"] # 由标定点得到液晶区域 dst = boxRectifier(template, info) # 灰度图 gray = cv2.cvtColor(dst, cv2.COLOR_BGR2GRAY) # 针对不同的数字表类型进行不同的增强 if info["digitType"] != "TTC": Blur = cv2.GaussianBlur(gray, (5, 5), 0) Hist = cv2.equalizeHist(Blur) thresh = cv2.adaptiveThreshold(Hist, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY_INV, 15, 11) else: thresh = cv2.adaptiveThreshold(gray, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 55, 11) # 存储图片 if not os.path.exists("storeDigitData"): os.mkdir("storeDigitData") if not os.path.exists("storeDigitData/digits"): os.mkdir("storeDigitData/digits") imgNum = int((len(os.listdir("storeDigitData/")) - 1) / 3) cv2.imwrite("storeDigitData/" + str(imgNum) + "_dst.bmp", dst) cv2.imwrite("storeDigitData/" + str(imgNum) + "_gray.bmp", gray) cv2.imwrite("storeDigitData/" + str(imgNum) + "_thresh.bmp", thresh) # 网络初始化 MyNet = newNet() myRes = [] for i in range(len(heightSplit)): split = widthSplit[i] myNum = "" for j in range(len(split) - 1): if "decimal" in info.keys() and j == info["decimal"][i]: myNum += "." continue # 得到分割的图片区域 img = thresh[heightSplit[i][0]:heightSplit[i][1], split[j]:split[j + 1]] # 增强 kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (1, 2)) img = cv2.morphologyEx(img, cv2.MORPH_OPEN, kernel) num = MyNet.recognizeNet(img) myNum = myNum + num # 存储图片 cv2.imwrite( "storeDigitData/digits/{}_{}{}_p{}.bmp".format( imgNum, i, j, num), img) myRes.append(myNum) if info["digitType"] == "KWH": myRes[0] = myRes[0][:4] + myRes.pop(1) # 去除头部的非数字字符,同时将非头部的字符转为数字 for i in range(len(myRes)): temp = "" for j, c in enumerate(myRes[i]): if c != "?": temp += c elif j != 0: temp += str(random.randint(0, 9)) myRes[i] = float(temp) if ifShow: cv2.imshow("rec", dst) cv2.imshow("image", image) print(myRes) cv2.waitKey(0) cv2.destroyAllWindows() return myRes