def __loadimg(self): ''' 读入路径为self.__setting['path']的原图并转换为黑白图片。 ''' if self.__setting['path'] == '~': # 说明读入剪贴板 img = grabclipboard() if isinstance(img, Image.Image): self.__image = img else: stderr.write('图片错误:剪贴板中不包含图片\n') return 8 elif self.__setting['pathtype'] == 'local': # 说明读本地图片 if exists(self.__setting['path']): self.__image = Image.open(self.__setting['path']) else: stderr.write('图片错误:图片' + self.__setting['path'] + '不存在\n') return 1 else: # 说明读网络图片 try: f = BytesIO(b'') f.write(rq.get(self.__setting['path']).content) except: stderr.write('网络错误:图片获取失败\n') return 7 self.__image = Image.open(f) self.__image = self.__image.convert('L') # 转换为黑白 return 0
def screen_shot(option): i = 1 while True: print('-' * 20, '{}'.format(i), '-' * 20, end='\n') if wait(hotkey='shift+win+s') == None: if wait(hotkey='ctrl') == None: sleep(0.01) try: img = grabclipboard() img.save('picture.png') except: print('请重新截图') screen_shot() i += 1 alltexts = get_text(option) sleep(0.2) # w.OpenClipboard() # d=w.GetClipboardData(win32con.CF_UNICODETEXT) # w.CloseClipboard()''' # OpenClipboard() # EmptyClipboard() # SetClipboardData(win32con.CF_UNICODETEXT, alltexts) # CloseClipboard() pyperclip.copy(alltexts) his_texts = "-" * 20 + "{}".format( datetime.now().strftime(r'%Y-%m-%d %H:%M:%S') ) + "-" * 20 + "\n" + alltexts + "\n\n\n" with open("历史剪切板.txt", "a") as f: f.write(his_texts) os.remove("picture.png")
def screen_shot(option): i = 1 while True: print('-' * 20, '{}'.format(i), '-' * 20, end='\n') if wait(hotkey='shift+win+s') == None: if wait(hotkey='ctrl') == None: sleep(0.01) try: img = grabclipboard() img.save('picture.png') except: print('请重新截图') screen_shot() i += 1 alltexts = baidu_ocr.get_text(option) sleep(0.2) # w.OpenClipboard() # d=w.GetClipboardData(win32con.CF_UNICODETEXT) # w.CloseClipboard()''' # OpenClipboard() # EmptyClipboard() # SetClipboardData(win32con.CF_UNICODETEXT, alltexts) # CloseClipboard() pyperclip.copy(alltexts)
def scan_clipboard(capture: bool = typer.Option(False, "-c")): """ Capture QR code from your screen """ if capture: if sys.platform == "darwin": try: subprocess.run(["screencapture", "-c", "-s"], check=True) except subprocess.CalledProcessError: typer.echo("Capture cancelled.") _decode(grabclipboard())
def main(murge=True, pic=None): client = init_client() if pic: img = get_file_content(pic) else: img = grabclipboard() img.save('tmp.png', 'PNG') data = get_file_content('tmp.png') txtlist = ocr_baidu(client, data) if murge: txtlist = murge_txt(txtlist) for x in txtlist: print(x)
def OCR(): global Click_x, Click_y, Release_x, Release_y global STOP clearClipboard() oneShot() GetPointer() # sleep(1) #必须sleep(下次尝试用try抓取异常->对于剪贴板本来就有图片会导致图片识别的上一张,现在改为判断上一次的图片和现在的图片是否相等->判定很奇怪,改成先清空剪贴板再利用异常捕获),grabclipboard函数要立即截取,而存入剪贴板需要时间,所以需要等待 while True: try: #如果与上次不相等 if STOP or (Click_x == Release_x and Click_y == Release_y): return False return OCR_Core(grabclipboard()) except AttributeError: pass except: messagebox.showinfo(u"出现错误辣", "错误信息:\n" + traceback.print_exc())
def continueGAL(*args): oneShot() GetPointer() while True: try: # 如果与上次不相等 if STOP or (Click_x == Release_x and Click_y == Release_y): return False OCRText = OCR_Core(grabclipboard()) except: messagebox.showinfo(u"出现错误辣", "错误信息:\n" + traceback.print_exc()) else: GALResultText.delete(1.0, "end") GALResultText.insert( "end", eval(TransAll[GALTranslationSetting.get()] + """('''""" + OCRText + """''')""")) break
def continueOCR(): OCRWin.withdraw() oneShot() GetPointer() while True: try: #如果与上次不相等 if STOP or (Click_x == Release_x and Click_y == Release_y): return False OCRText = OCR_Core(grabclipboard()) except: messagebox.showinfo(u"出现错误辣", "错误信息:\n" + traceback.print_exc()) else: OCRResultText.delete(1.0, "end") OCRResultText.insert("end", OCRText) OCRWin.deiconify() if ToClip.get(): textToClip() break
def ocr(image=None, lang=None, verbose=False): from PIL.ImageGrab import grabclipboard from mylib.wrapper.tesseract_ocr import TesseractOCRCLIWrapper import yaml config = yaml.safe_load( open(os.path.splitext(__file__)[0] + '.yml').read())[ocr.__name__]['tesseract'] tess = TesseractOCRCLIWrapper(config['bin']) if verbose: tess.logger.setLevel('DEBUG') logging.set_logger_format(tess.logger, '# %(message)s') lang = lang or config.get('lang') image = image or grabclipboard() if lang: tess.lang(*lang) try: s = '\r\n'.join(tess.img(image, gray=True).txt().strip().splitlines()) if s.strip(): ostk.clipboard.clear().set(s) print(s) except TypeError: print(f'! non-image provided', file=sys.stderr) sys.exit(1)
def imageDetect(self): APP_ID = '11538030' API_KEY = 'qFXoApbGYFpeNUgRrVhHFBz6' SECRET_KEY = 'dzA3hD7CgQ6529uYuwlqV448MOYCq7Bf' client = AipOcr(APP_ID, API_KEY, SECRET_KEY) try: image = grabclipboard() image.save("image.jpg") # image_ = image.tobitmap() with open("image.jpg", "rb") as f: image = f.read() response = client.basicGeneral(image) os.remove("image.jpg") if self.btn1.isChecked(): words = ''.join(i["words"] for i in response["words_result"]) else: words = '\n'.join(i["words"] for i in response["words_result"]) self.textShow.setPlainText(words) pyperclip.copy(words) if self.isHidden() and self.btn6.isChecked(): self.sysIcon.showMessage("已复制到剪切板", words) except: QMessageBox.about(self, "错误", "请重新截屏")
import numpy as np from PIL.ImageGrab import grabclipboard from pytesseract import image_to_string as ocr from numpy import array import pyperclip pytesseract.pytesseract.tesseract_cmd = 'C:/Program Files/Tesseract-OCR/tesseract.exe' im = grabclipboard() if im: t = ocr(array(im)) print(t) pyperclip.paste(t) else: print("No image was found") input("")