def getFullURL(self): from six import string_types url = self.get_url() domain = self.get_domain() if self.useHttps: protocol = b"https:/" else: protocol = b"http:/" fullURL = b"/".join([protocol, domain, url]) if six.PY2: queryString = b"?visualFeatures={2}&language={0}&details={1}".format( self._language.decode("utf-8"), self._detail, self.getFeatures(), ) else: queryString = "?visualFeatures={2}&language={0}&details={1}".format( self._language, self._detail, self.getFeatures(), ) queryString = queryString.encode('utf-8') fullURL = fullURL + queryString # Unicode URL cause urllib3 to decode raw image data as if they were unicode. if isinstance(fullURL, string_types): if not isinstance(fullURL, str): # URL is unicode in py2 log.io("Decode URL to str") return fullURL.decode("utf-8") else: # is str in py2 return fullURL else: # is bytes in py3 return fullURL
def prepareImageContent(self, image): imageContent = self.serializeImage(image) imageSize = len(imageContent) newImage = None while imageSize >= self.maxSize and image.width >= self.minWidth and image.height >= self.minHeight: self.imageInfo.resizeFactor = self.imageInfo.resizeFactor * self.asymptoticResizeFactor newImage = self.getResizedImage() imageContent = self.serializeImage(newImage) imageSize = len(imageContent) if config.conf["onlineOCRGeneral"]["verboseDebugLogging"]: msg = "newWidth\n{0}\nnewHeight\n{1}\nsize\n{2}".format( newImage.width, newImage.height, imageSize) log.io(msg) if imageSize > self.maxSize: # Translators: Reported when error occurred during image serialization ui.message(_(u"Image content is too big to upload.")) if config.conf["onlineOCRGeneral"]["verboseDebugLogging"]: newImage = self.getResizedImage() msg = "newWidth\n{0}\nnewHeight\n{1}\nsize\n{2}".format( newImage.width, newImage.height, imageSize) log.io(msg) return False else: if self.uploadBase64EncodeImage: return base64.standard_b64encode(imageContent) else: return imageContent
def extract_text(apiResult): lineResult = [] resultSets = apiResult["ParsedResults"] for result in resultSets: lineResult.append(result["ParsedText"]) log.io(lineResult) return u" ".join(lineResult)
def doHTTPRequest(callback, method, url, **kwargs): """ Call this method in a separate thread to avoid blocking. @param callback: @type callback: @param method: @type method: @param url: @type url: bytes """ refreshConnectionPool() try: if isinstance(url, bytes) and six.PY3: url = url.decode('utf-8') r = httpConnectionPool.request(method, url, **kwargs) except urllib3.exceptions.TimeoutError as e: # Translators: Message announced when network error occurred showMessageInNetworkThread(_(u"Internet connection timeout.Recognition failed.")) callback(None) return except urllib3.exceptions.HTTPError as e: log.error(e) # Translators: Message announced when network error occurred showMessageInNetworkThread(_(u"Recognition failed. Internet connection error.")) callback(None) return log.io(r.data) callback(r.data)
def _speakSpellingGen(text,locale,useCharacterDescriptions): textLength=len(text) synth=getSynth() synthConfig=config.conf["speech"][synth.name] for count,char in enumerate(text): uppercase=char.isupper() charDesc=None if useCharacterDescriptions: from characterProcessing import getCharacterDescription charDesc=getCharacterDescription(locale,char.lower()) if charDesc: char=charDesc else: char=processSymbol(char) if uppercase and synthConfig["sayCapForCapitals"]: char=_("cap %s")%char if uppercase and synth.isSupported("pitch") and synthConfig["raisePitchForCapitals"]: oldPitch=synthConfig["pitch"] synth.pitch=max(0,min(oldPitch+synthConfig["capPitchChange"],100)) index=count+1 log.io("Speaking character %r"%char) if len(char) == 1 and synthConfig["useSpellingFunctionality"]: synth.speakCharacter(char,index=index) else: synth.speakText(char,index=index) if uppercase and synth.isSupported("pitch") and synthConfig["raisePitchForCapitals"]: synth.pitch=oldPitch while textLength>1 and (isPaused or getLastSpeechIndex()!=index): yield yield if uppercase and synthConfig["beepForCapitals"]: tones.beep(2000,50)
def recognizeHyperLink(self, link, onResult): """ Setup data for recognition then send request @param link: @param onResult: Result callback for result viewer @return: None """ if self.networkThread: # Translators: Error message ui.message( _(u"There is another recognition ongoing. Please wait.")) return payloads = self.getPayloadForHyperLink(link) fullURL = self.getFullURL() headers = self.getHTTPHeaders("") if config.conf["onlineOCRGeneral"]["verboseDebugLogging"]: log.io(type(fullURL)) msg = u"{0}\n{1}\n{2}".format( fullURL, headers, payloads, ) log.io(msg) self.sendRequest(self.callback, fullURL, payloads, headers)
def recognize(self, pixels, imageInfo, onResult): """ Setup data for recognition then send request @param pixels: @param imageInfo: @param onResult: Result callback for result viewer @return: None """ if self.networkThread: # Translators: Error message ui.message( _(u"There is another recognition ongoing. Please wait.")) return self.pixels = pixels self.onResult = onResult self.imageInfo = imageInfo imageObject = self.prepareImageObject(pixels, imageInfo) if not imageObject: return imageContent = self.prepareImageContent(imageObject) if not imageContent: return payloads = self.getPayload(imageContent) fullURL = self.getFullURL() headers = self.getHTTPHeaders(imageContent) if config.conf["onlineOCRGeneral"]["verboseDebugLogging"]: log.io(type(fullURL)) msg = u"{0}\n{1}\n{2}".format( fullURL, headers, payloads, ) log.io(msg) self.sendRequest(self.callback, fullURL, payloads, headers)
def executeGesture(self, gesture): """Perform the action associated with a gesture. @param gesture: The gesture to execute. @type gesture: L{InputGesture} @raise NoInputGestureAction: If there is no action to perform. """ if watchdog.isAttemptingRecovery: # The core is dead, so don't try to perform an action. # This lets gestures pass through unhindered where possible, # as well as stopping a flood of actions when the core revives. raise NoInputGestureAction script = gesture.script focus = api.getFocusObject() if focus.sleepMode is focus.SLEEP_FULL or (focus.sleepMode and not getattr(script, 'allowInSleepMode', False)): raise NoInputGestureAction wasInSayAll=False if gesture.isModifier: if not self.lastModifierWasInSayAll: wasInSayAll=self.lastModifierWasInSayAll=sayAllHandler.isRunning() elif self.lastModifierWasInSayAll: wasInSayAll=True self.lastModifierWasInSayAll=False else: wasInSayAll=sayAllHandler.isRunning() if wasInSayAll: gesture.wasInSayAll=True speechEffect = gesture.speechEffectWhenExecuted if speechEffect == gesture.SPEECHEFFECT_CANCEL: queueHandler.queueFunction(queueHandler.eventQueue, speech.cancelSpeech) elif speechEffect in (gesture.SPEECHEFFECT_PAUSE, gesture.SPEECHEFFECT_RESUME): queueHandler.queueFunction(queueHandler.eventQueue, speech.pauseSpeech, speechEffect == gesture.SPEECHEFFECT_PAUSE) if log.isEnabledFor(log.IO) and not gesture.isModifier: log.io("Input: %s" % gesture.logIdentifier) if self._captureFunc: try: if self._captureFunc(gesture) is False: return except: log.error("Error in capture function, disabling", exc_info=True) self._captureFunc = None if gesture.isModifier: raise NoInputGestureAction if config.conf["keyboard"]["speakCommandKeys"] and gesture.shouldReportAsCommand: queueHandler.queueFunction(queueHandler.eventQueue, speech.speakMessage, gesture.displayName) gesture.reportExtra() if script: scriptHandler.queueScript(script, gesture) return raise NoInputGestureAction
def executeGesture(self, gesture): """Perform the action associated with a gesture. @param gesture: The gesture to execute. @type gesture: L{InputGesture} @raise NoInputGestureAction: If there is no action to perform. """ if watchdog.isAttemptingRecovery: # The core is dead, so don't try to perform an action. # This lets gestures pass through unhindered where possible, # as well as stopping a flood of actions when the core revives. raise NoInputGestureAction script = gesture.script focus = api.getFocusObject() if focus.sleepMode is focus.SLEEP_FULL or ( focus.sleepMode and not getattr(script, 'allowInSleepMode', False)): raise NoInputGestureAction speechEffect = gesture.speechEffectWhenExecuted if speechEffect == gesture.SPEECHEFFECT_CANCEL: queueHandler.queueFunction(queueHandler.eventQueue, speech.cancelSpeech) elif speechEffect in (gesture.SPEECHEFFECT_PAUSE, gesture.SPEECHEFFECT_RESUME): queueHandler.queueFunction( queueHandler.eventQueue, speech.pauseSpeech, speechEffect == gesture.SPEECHEFFECT_PAUSE) if log.isEnabledFor(log.IO) and not gesture.isModifier: log.io("Input: %s" % gesture.logIdentifier) if self.isInputHelpActive: bypass = getattr(script, "bypassInputHelp", False) queueHandler.queueFunction(queueHandler.eventQueue, self._handleInputHelp, gesture, onlyLog=bypass) if not bypass: return if gesture.isModifier: raise NoInputGestureAction if config.conf["keyboard"][ "speakCommandKeys"] and gesture.shouldReportAsCommand: queueHandler.queueFunction(queueHandler.eventQueue, speech.speakMessage, gesture.displayName) gesture.reportExtra() if script: scriptHandler.queueScript(script, gesture) return raise NoInputGestureAction
def logTimeSinceInput(): """Log the time since the last input was received. This does nothing if time since input logging is disabled. """ if (not log.isEnabledFor(log.IO) or not config.conf["debugLog"]["timeSinceInput"] or not manager or not manager._lastInputTime): return log.io("%.3f sec since input" % (time.time() - manager._lastInputTime))
def logTimeSinceInput(): """Log the time since the last input was received. This does nothing if time since input logging is disabled. """ if (not log.isEnabledFor(log.IO) or not config.conf["debugLog"]["timeSinceInput"] or not manager or not manager._lastInputTime ): return log.io("%.3f sec since input" % (time.time() - manager._lastInputTime))
def _pushNextSpeech(self, doneSpeaking: bool): queue = self._getNextPriority() if not queue: # No more speech. log._speechManagerDebug("No more speech") self._curPriQueue = None return if not self._curPriQueue: # First utterance after no speech. self._curPriQueue = queue elif queue.priority > self._curPriQueue.priority: # Preempted by higher priority speech. if self._curPriQueue.enteredProfileTriggers: if not doneSpeaking: # Wait for the synth to finish speaking. # _handleDoneSpeaking will call us again. self._shouldPushWhenDoneSpeaking = True return self._exitProfileTriggers( self._curPriQueue.enteredProfileTriggers) self._curPriQueue = queue elif queue.priority < self._curPriQueue.priority: # Resuming a preempted, lower priority queue. if queue.enteredProfileTriggers: if not doneSpeaking: # Wait for the synth to finish speaking. # _handleDoneSpeaking will call us again. self._shouldPushWhenDoneSpeaking = True return self._restoreProfileTriggers(queue.enteredProfileTriggers) self._curPriQueue = queue while queue.pendingSequences and isinstance( queue.pendingSequences[0][0], ConfigProfileTriggerCommand): if not doneSpeaking: # Wait for the synth to finish speaking. # _handleDoneSpeaking will call us again. self._shouldPushWhenDoneSpeaking = True return self._switchProfile() if not queue.pendingSequences: # The last commands in this queue were profile switches. # Call this method again in case other queues are waiting. return self._pushNextSpeech(True) seq = self._buildNextUtterance() if seq: log.io(f"Sending to synth: {seq}") # So that we can handle any accidentally skipped indexes. for item in seq: if isinstance(item, IndexCommand): self._indexesSpeaking.append(item.index) getSynth().speak(seq)
def nvdaKgsHandleKeyInfoProc(lpKeys): keys = (lpKeys[0], lpKeys[1], lpKeys[2]) log.io("keyInfo %d %d %d" % keys) log.io("keyInfo hex %x %x %x" % keys) names = [] routingIndex = None if keys[0] == 0: if keys[1] & 1: names.append('lf') if keys[1] & 2: names.append('bk') if keys[1] & 4: names.append('sr') if keys[1] & 8: names.append('sl') if keys[1] & 16: names.append('func1') if keys[1] & 32: names.append('func2') if keys[1] & 64: names.append('func3') if keys[1] & 128: names.append('func4') else: tCode = 240 if keys[0] & 1 + tCode: names.append('func1') if keys[0] & 2 + tCode: names.append('func2') if keys[0] & 4 + tCode: names.append('func3') if keys[0] & 8 + tCode: names.append('func4') names.append('route') routingIndex = keys[1] - 1 if routingIndex is not None: log.io("names %s %d" % ('+'.join(names), routingIndex)) else: log.io("names %s" % '+'.join(names)) if len(names): inputCore.manager.executeGesture(InputGesture(names, routingIndex)) return True return False
def getSignature(ak, sk, url, method='POST'): import hashlib import time import hmac import base64 pre = "sac-auth-v1/" + ak + '/' + str(int(time.time())) + "/3600" part2 = "\nPOST\napi.ai.sogou.com\n/pub/ocr\n" message = pre + part2 if not isinstance(message, bytes): message = message.encode('utf-8') pre = pre.encode('utf-8') sk = sk.encode('utf-8') signature = hmac.new(sk, message, digestmod=hashlib.sha256).digest() headerSig = pre + b'/' + base64.b64encode(signature) log.io(headerSig) return str(headerSig)
def form_encode(options): """ generate www-form @param options: request parameters @type options: dict @return: """ encoded_option = "" key_sequence = sorted(iterkeys(options), reverse=False) for k in key_sequence: query = "&" + urlencode({k: options[k]}) encoded_option += query payload = encoded_option[1:] if config.conf["onlineOCRGeneral"]["verboseDebugLogging"]: log.io(payload) return payload
def executeGesture(self, gesture): """Perform the action associated with a gesture. @param gesture: The gesture to execute. @type gesture: L{InputGesture} @raise NoInputGestureAction: If there is no action to perform. """ if watchdog.isAttemptingRecovery: # The core is dead, so don't try to perform an action. # This lets gestures pass through unhindered where possible, # as well as stopping a flood of actions when the core revives. raise NoInputGestureAction script = gesture.script focus = api.getFocusObject() if focus.sleepMode is focus.SLEEP_FULL or (focus.sleepMode and not getattr(script, "allowInSleepMode", False)): raise NoInputGestureAction speechEffect = gesture.speechEffectWhenExecuted if speechEffect == gesture.SPEECHEFFECT_CANCEL: queueHandler.queueFunction(queueHandler.eventQueue, speech.cancelSpeech) elif speechEffect in (gesture.SPEECHEFFECT_PAUSE, gesture.SPEECHEFFECT_RESUME): queueHandler.queueFunction( queueHandler.eventQueue, speech.pauseSpeech, speechEffect == gesture.SPEECHEFFECT_PAUSE ) if log.isEnabledFor(log.IO) and not gesture.isModifier: log.io("Input: %s" % gesture.logIdentifier) if self.isInputHelpActive: bypass = getattr(script, "bypassInputHelp", False) queueHandler.queueFunction(queueHandler.eventQueue, self._handleInputHelp, gesture, onlyLog=bypass) if not bypass: return if gesture.isModifier: raise NoInputGestureAction if config.conf["keyboard"]["speakCommandKeys"] and gesture.shouldReportAsCommand: queueHandler.queueFunction(queueHandler.eventQueue, speech.speakMessage, gesture.displayName) gesture.reportExtra() if script: scriptHandler.queueScript(script, gesture) return raise NoInputGestureAction
def convert_to_line_result_format(self, apiResult): lineResult = [] resultSets = apiResult["ParsedResults"] for result in resultSets: for line in result["TextOverlay"]["Lines"]: currentLine = [] for word in line['Words']: currentLine.append({ "x": word["Left"], "y": word["Top"], "width": word["Width"], "height": word["Height"], "text": word["WordText"], }) lineResult.append(currentLine) log.io(lineResult) return lineResult
def convert_to_line_result_format(self, apiResult): def extractCoordinate(coord): groups = coord.split(',') return int(groups[0]), int(groups[1]) lineResult = [] for items in apiResult[u"result"]: rightXCoord, yCoord = extractCoordinate(items[u"frame"][1]) xCoord, downYCoord = extractCoordinate(items[u"frame"][2]) lineResult.append([{ "x": xCoord, "y": yCoord, "width": rightXCoord - xCoord, "height": downYCoord - yCoord, "text": items[u"content"], }]) log.io(lineResult) return lineResult
def nvdaKgsHandleKeyInfoProc(lpKeys): keys = (lpKeys[0], lpKeys[1], lpKeys[2], lpKeys[3]) log.io("keyInfo %d %d %d %d" % keys) log.io("keyInfo hex %x %x %x %x" % keys) names = set() routingIndex = None if keys[2] & 1: names.add('func1') if keys[2] & 2: names.add('func4') if keys[2] & 4: names.add('ctrl') if keys[2] & 8: names.add('alt') if keys[2] & 16: names.add('select') if keys[2] & 32: names.add('read') if keys[2] & 64: names.add('func2') if keys[2] & 128: names.add('func3') if keys[0] == 1: if keys[1] & 1: names.add('space') if keys[1] & 2: names.add('dot6') if keys[1] & 4: names.add('dot5') if keys[1] & 8: names.add('dot4') if keys[1] & 16: names.add('enter') if keys[1] & 32: names.add('dot3') if keys[1] & 64: names.add('dot2') if keys[1] & 128: names.add('dot1') elif keys[0] == 2: if keys[1] & 1: names.add('esc') if keys[1] & 2: names.add('inf') if keys[1] & 4: names.add('bs') if keys[1] & 8: names.add('del') if keys[1] & 16: names.add('ins') if keys[1] & 32: names.add('chng') if keys[1] & 64: names.add('ok') if keys[1] & 128: names.add('set') elif keys[0] == 3: if keys[1] & 1: names.add('upArrow') if keys[1] & 2: names.add('downArrow') if keys[1] & 4: names.add('leftArrow') if keys[1] & 8: names.add('rightArrow') elif keys[0] == 4: names.add('route') routingIndex = keys[1] - 1 elif keys[0] == 6: if keys[1] & 1: names.add('bw') if keys[1] & 2: names.add('fw') if keys[1] & 4: names.add('ls') if keys[1] & 8: names.add('rs') if routingIndex is not None: log.io("names %s %d" % ('+'.join(names), routingIndex)) else: log.io("names %s" % '+'.join(names)) if len(names): inputCore.manager.executeGesture(InputGesture(names, routingIndex)) return True return False
def isValid(self): oldProxy = config.conf["onlineOCRGeneral"]["proxyAddress"] oldProxyType = config.conf["onlineOCRGeneral"]["proxyType"] newProxyType = self.PROXY_TYPES[self.proxyTypeList.GetSelection()][0] if newProxyType != u"noProxy": # Translators: Reported when save proxy settings in online ocr panel ui.message(_(u"Checking your proxy settings")) config.conf["onlineOCRGeneral"]["proxyType"] = newProxyType config.conf["onlineOCRGeneral"][ "proxyAddress"] = self.proxyAddressTextCtrl.GetValue() from .winHttp import httpConnectionPool, refreshConnectionPool try: refreshConnectionPool() log.io(httpConnectionPool) r = httpConnectionPool.request(b'GET', b"http://www.example.com") msg = u"pool:\n{0}\nHeaders:\n{1}\nResponse:\n{2}".format( httpConnectionPool, r.headers, r.data, ) log.io(msg) gui.messageBox( # Translators: Reported when proxy verification fails in online ocr settings panel caption=_(u"Proxy is valid, settings is saved."), message=_(r.data), ) return True except Exception as e: config.conf["onlineOCRGeneral"]["proxyType"] = oldProxyType config.conf["onlineOCRGeneral"]["proxyAddress"] = oldProxy refreshConnectionPool() gui.messageBox( # Translators: Reported when proxy verification fails in online ocr settings panel caption= _(u"Proxy is not valid, please check your proxy type and address." ), message=_(e), ) return False else: return True
def create_payload(self, png_string, app_id="", app_secret="", use_nvda_cn=True): if use_nvda_cn: options = { b"image": png_string } else: import time import random options = { b"app_id": app_id, b"time_stamp": int(time.time()), b"nonce_str": str(random.randint(10000, 99999)), b"image": png_string } sign = self.calculate_signature(options, app_secret) sign = sign.upper() options[b"sign"] = sign log.io(options) return options
def beep(hz,length,left=50,right=50): """Plays a tone at the given hz, length, and stereo balance. @param hz: pitch in hz of the tone @type hz: float @param length: length of the tone in ms @type length: integer @param left: volume of the left channel (0 to 100) @type left: integer @param right: volume of the right channel (0 to 100) @type right: integer """ log.io("Beep at pitch %s, for %s ms, left volume %s, right volume %s"%(hz,length,left,right)) if not player: return from NVDAHelper import generateBeep bufSize=generateBeep(None,hz,length,left,right) buf=create_string_buffer(bufSize) generateBeep(buf,hz,length,left,right) player.stop() player.feed(buf.raw)
def speakTypedCharacters(ch): global curWordChars; if api.isTypingProtected(): realChar="*" else: realChar=ch if ch.isalnum(): curWordChars.append(realChar) elif ch=="\b": # Backspace, so remove the last character from our buffer. del curWordChars[-1:] elif len(curWordChars)>0: typedWord="".join(curWordChars) curWordChars=[] if log.isEnabledFor(log.IO): log.io("typed word: %s"%typedWord) if config.conf["keyboard"]["speakTypedWords"]: speakText(typedWord) if config.conf["keyboard"]["speakTypedCharacters"] and ord(ch)>=32: speakSpelling(realChar)
def convert_to_line_result_format(self, apiResult): lineResult = [] height = apiResult["Response"]["TextDetections"][0]["ItemPolygon"]["Y"] wordResult = [] for words in apiResult["Response"]["TextDetections"]: newHeight = words["ItemPolygon"]["Y"] if newHeight > height: height = newHeight lineResult.append(wordResult) wordResult = [] wordResult.append({ "x": words["ItemPolygon"]["X"], "y": words["ItemPolygon"]["Y"], "width": words["ItemPolygon"]["Width"], "height": words["ItemPolygon"]["Height"], "text": words["DetectedText"], }) lineResult.append(wordResult) log.io(lineResult) return lineResult
def speakText(text,index=None,reason=REASON_MESSAGE,expandPunctuation=None): """Speaks some text. @param text: The text to speak. @type text: str @param index: The index to mark this text with, which can be used later to determine whether this piece of text has been spoken. @type index: int @param reason: The reason for this speech; one of the REASON_* constants. @param expandPunctuation: Whether to speak punctuation; C{None} (default) to use the user's configuration. @param expandPunctuation: bool """ import speechViewer if speechViewer.isActive: speechViewer.appendText(text) from brailleDisplayDrivers.DirectBM_drv import DirectBM_drv # Masataka.Shinke DirectBM_drv.sp(text) # Masataka.Shinke #if speechViewer.isActive: # Masataka.Shinke # speechViewer.appendText(u'('+DirectBM_drv.wakach(text)+u')') # Masataka.Shinke global beenCanceled, curWordChars curWordChars=[] if speechMode==speechMode_off: return elif speechMode==speechMode_beeps: tones.beep(config.conf["speech"]["beepSpeechModePitch"],speechMode_beeps_ms) return if isPaused: cancelSpeech() beenCanceled=False log.io("Speaking %r" % text) if expandPunctuation is None: expandPunctuation=config.conf["speech"]["speakPunctuation"] if text is None: text="" else: text=processText(text,expandPunctuation=expandPunctuation) if not text or not text.isspace(): getSynth().speakText(text,index=index)
def convert_to_line_result_format(self, apiResult): def extractCoordinate(coord): groups = coord.split(',') return int(groups[0]), int(groups[1]), int(groups[2]), int( groups[3]) lineResult = [] resultSets = apiResult["regions"] for result in resultSets: for line in result["lines"]: currentLine = [] for word in line['words']: x, y, w, h = extractCoordinate(word["boundingBox"]) currentLine.append({ "x": x, "y": y, "width": w, "height": h, "text": word["text"] }) lineResult.append(currentLine) log.io(lineResult) return lineResult
def getConnectionPool(): proxyType = config.conf["onlineOCRGeneral"]["proxyType"] proxyAddress = config.conf["onlineOCRGeneral"]["proxyAddress"] msg = u"type:\n{0}\naddress:\n{1}".format( proxyType, proxyAddress ) if proxyType == u"http": pool = urllib3.ProxyManager( proxyAddress, # cert_reqs='CERT_REQUIRED', # ca_certs=certifi.where(), timeout=urllib3.Timeout(connect=10, read=10) ) msg += u"\nHTTP proxy\n{0}".format( pool ) log.io(msg) return pool elif proxyType == u"socks": pool = SOCKSProxyManager( proxyAddress, # cert_reqs='CERT_REQUIRED', # ca_certs=certifi.where(), timeout=urllib3.Timeout(connect=10, read=10) ) msg += u"\nSocks proxy\n{0}".format( pool ) log.io(msg) return pool else: pool = urllib3.PoolManager( # cert_reqs='CERT_REQUIRED', # ca_certs=certifi.where(), timeout=urllib3.Timeout(connect=10, read=10) ) msg += u"\nNo proxy\n{0}".format( pool ) log.io(msg) return pool
def extract_text(self, apiResult): words = [] for items in apiResult["Response"]["TextDetections"]: words.append(items["DetectedText"]) log.io(words) return u" ".join(words)
def executeGesture(self, gesture): """Perform the action associated with a gesture. @param gesture: The gesture to execute. @type gesture: L{InputGesture} @raise NoInputGestureAction: If there is no action to perform. """ if watchdog.isAttemptingRecovery: # The core is dead, so don't try to perform an action. # This lets gestures pass through unhindered where possible, # as well as stopping a flood of actions when the core revives. raise NoInputGestureAction script = gesture.script focus = api.getFocusObject() if focus.sleepMode is focus.SLEEP_FULL or ( focus.sleepMode and not getattr(script, 'allowInSleepMode', False)): raise NoInputGestureAction wasInSayAll = False if gesture.isModifier: if not self.lastModifierWasInSayAll: wasInSayAll = self.lastModifierWasInSayAll = sayAllHandler.isRunning( ) elif self.lastModifierWasInSayAll: wasInSayAll = True self.lastModifierWasInSayAll = False else: wasInSayAll = sayAllHandler.isRunning() if wasInSayAll: gesture.wasInSayAll = True speechEffect = gesture.speechEffectWhenExecuted if speechEffect == gesture.SPEECHEFFECT_CANCEL: queueHandler.queueFunction(queueHandler.eventQueue, speech.cancelSpeech) elif speechEffect in (gesture.SPEECHEFFECT_PAUSE, gesture.SPEECHEFFECT_RESUME): queueHandler.queueFunction( queueHandler.eventQueue, speech.pauseSpeech, speechEffect == gesture.SPEECHEFFECT_PAUSE) if gesture.shouldPreventSystemIdle: winKernel.SetThreadExecutionState(winKernel.ES_SYSTEM_REQUIRED) if log.isEnabledFor(log.IO) and not gesture.isModifier: self._lastInputTime = time.time() log.io("Input: %s" % gesture.identifiers[0]) if self._captureFunc: try: if self._captureFunc(gesture) is False: return except: log.error("Error in capture function, disabling", exc_info=True) self._captureFunc = None if gesture.isModifier: raise NoInputGestureAction if config.conf["keyboard"][ "speakCommandKeys"] and gesture.shouldReportAsCommand: queueHandler.queueFunction(queueHandler.eventQueue, speech.speakMessage, gesture.displayName) gesture.reportExtra() # #2953: if an intercepted command Script (script that sends a gesture) is queued # then queue all following gestures (that don't have a script) with a fake script so that they remain in order. if not script and scriptHandler._numIncompleteInterceptedCommandScripts: script = lambda gesture: gesture.send() if script: scriptHandler.queueScript(script, gesture) return else: # Clear memorized last script to avoid getLastScriptRepeatCount detect a repeat # in case an unbound gesture is executed between two identical bound gestures. queueHandler.queueFunction(queueHandler.eventQueue, scriptHandler.clearLastScript) raise NoInputGestureAction
def convert_to_line_result_format(self, apiResult): lineResult = [[{ "x": 0, "y": 0, "width": 1, "height": 1, "text": self.extract_text(apiResult), }]] if "categories" in apiResult: for category in apiResult["categories"]: if "detail" in category: entries = [] if "celebrities" in category["detail"]: for result in category["detail"]["celebrities"]: entries.append({ "x": result["faceRectangle"]["x"], "y": result["faceRectangle"]["y"], "width": result["faceRectangle"]["w"], "height": result["faceRectangle"]["h"], "text": result["name"] }) if len(entries) > 0: lineResult.append(entries) if "objects" in apiResult and len(apiResult["objects"]) > 0: objectResult = [] resultSets = apiResult["objects"] for result in resultSets: objectResult.append({ "x": result["rectangle"]["x"], "y": result["rectangle"]["y"], "width": result["rectangle"]["w"], "height": result["rectangle"]["h"], "text": result["object"] }) lineResult.append(objectResult) if "brands" in apiResult and len(apiResult["brands"]) > 0: brandResult = [] resultSets = apiResult["brands"] for result in resultSets: brandResult.append({ "x": result["rectangle"]["x"], "y": result["rectangle"]["y"], "width": result["rectangle"]["w"], "height": result["rectangle"]["h"], "text": result["name"] }) lineResult.append(brandResult) if "faces" in apiResult and len(apiResult["faces"]) > 0: faceResult = [] resultSets = apiResult["faces"] for result in resultSets: faceResult.append({ "x": result["faceRectangle"]["x"], "y": result["faceRectangle"]["y"], "width": result["faceRectangle"]["w"], "height": result["faceRectangle"]["h"], "text": self.getFaceDescription(result) }) lineResult.append(faceResult) log.io(lineResult) return lineResult
def executeGesture(self, gesture): """Perform the action associated with a gesture. @param gesture: The gesture to execute. @type gesture: L{InputGesture} @raise NoInputGestureAction: If there is no action to perform. """ if watchdog.isAttemptingRecovery: # The core is dead, so don't try to perform an action. # This lets gestures pass through unhindered where possible, # as well as stopping a flood of actions when the core revives. raise NoInputGestureAction script = gesture.script focus = api.getFocusObject() if focus.sleepMode is focus.SLEEP_FULL or ( focus.sleepMode and not getattr(script, 'allowInSleepMode', False)): raise NoInputGestureAction wasInSayAll = False if gesture.isModifier: if not self.lastModifierWasInSayAll: wasInSayAll = self.lastModifierWasInSayAll = sayAllHandler.isRunning( ) elif self.lastModifierWasInSayAll: wasInSayAll = True self.lastModifierWasInSayAll = False else: wasInSayAll = sayAllHandler.isRunning() if wasInSayAll: gesture.wasInSayAll = True speechEffect = gesture.speechEffectWhenExecuted if speechEffect == gesture.SPEECHEFFECT_CANCEL: queueHandler.queueFunction(queueHandler.eventQueue, speech.cancelSpeech) elif speechEffect in (gesture.SPEECHEFFECT_PAUSE, gesture.SPEECHEFFECT_RESUME): queueHandler.queueFunction( queueHandler.eventQueue, speech.pauseSpeech, speechEffect == gesture.SPEECHEFFECT_PAUSE) if log.isEnabledFor(log.IO) and not gesture.isModifier: log.io("Input: %s" % gesture.logIdentifier) if self._captureFunc: try: if self._captureFunc(gesture) is False: return except: log.error("Error in capture function, disabling", exc_info=True) self._captureFunc = None if gesture.isModifier: raise NoInputGestureAction if config.conf["keyboard"][ "speakCommandKeys"] and gesture.shouldReportAsCommand: queueHandler.queueFunction(queueHandler.eventQueue, speech.speakMessage, gesture.displayName) # nvdajp begin if hasattr(gesture, "vkCode") and gesture.vkCode == winUser.VK_RETURN: dummy = winUser.getAsyncKeyState(winUser.VK_BACK) # nvdajp end gesture.reportExtra() # #2953: if an intercepted command Script (script that sends a gesture) is queued # then queue all following gestures (that don't have a script) with a fake script so that they remain in order. if not script and scriptHandler._numIncompleteInterceptedCommandScripts: script = lambda gesture: gesture.send() if script: scriptHandler.queueScript(script, gesture) return raise NoInputGestureAction
def getImageFromClipboard(cls): CF_DIB = 8 CF_HDROP = 15 CF_UNICODETEXT = 13 clipboardImage = None formats = cls.enumerateClipboardFormat() if CF_DIB in formats: clipboardImage = ImageGrab.grabclipboard() elif CF_HDROP in formats: try: filePathList = [] with winUser.openClipboard(gui.mainFrame.Handle): rawData = windll.user32.GetClipboardData(CF_HDROP) if not rawData: ui.message(_("Error occurred while getting pasted file.")) rawData = winKernel.HGLOBAL(rawData, autoFree=False) with rawData.lock() as addr: fileCount = windll.shell32.DragQueryFileW( c_uint32(addr), c_uint32(0xFFFFFFFF), c_uint32(0), c_uint32(0) ) for c in range(fileCount): BUFFER_SIZE = 4096 filePath = create_unicode_buffer(BUFFER_SIZE) windll.shell32.DragQueryFileW( c_uint32(addr), c_uint32(c), c_uint32(filePath), c_uint32(BUFFER_SIZE) ) filePathList.append(wstring_at(filePath, size=BUFFER_SIZE).rstrip('\x00')) log.debug("filePathList\n{0}".format(filePathList)) for fileName in filePathList: # TODO Add a prompt for users to choose from import os if os.path.isfile(fileName): clipboardImage = Image.open(rawData[0]) clipboardImage = clipboardImage.convert("RGB") return clipboardImage except TypeError as e: log.io(e) elif CF_UNICODETEXT in formats: # TODO extract url or file path from text then grab an image from it. try: from api import getClipData import os text = getClipData() if os.path.exists(text): if os.path.isfile(text): clipboardImage = Image.open(text) else: # Translators: Reported when text in clipboard is not a valid path ui.message(_(u"Text in clipboard is the name of a directory.")) else: # Translators: Reported when text in clipboard is not a valid path ui.message(_(u"Text in clipboard is not a valid path.")) except IOError: # Translators: Reported when cannot get content of the path specified errMsg = _("The file specified in clipboard is not an image") ui.message(errMsg) return clipboardImage
def executeKeyboardGesture(self, gesture, bypassRemanence=False): """Perform the action associated with a gesture. @param gesture: The gesture to execute @type gesture: L{InputGesture} @raise NoInputGestureAction: If there is no action to perform. """ if not hasattr(gesture, "noAction"): gesture.noAction = False if watchdog.isAttemptingRecovery: # The core is dead, so don't try to perform an action. # This lets gestures pass through unhindered where possible, # as well as stopping a flood of actions when the core revives. raise NoInputGestureAction newGesture = self.manageRemanence( gesture) if not bypassRemanence else None if newGesture is not None: queueHandler.queueFunction(queueHandler.eventQueue, self.executeNewGesture, newGesture) return newGesture = self.getNumpadKeyReplacement(gesture) if newGesture is not None: queueHandler.queueFunction(queueHandler.eventQueue, self.executeNewGesture, newGesture) return script = gesture.script focus = api.getFocusObject() if focus.sleepMode is focus.SLEEP_FULL\ or (focus.sleepMode and not getattr(script, 'allowInSleepMode', False)): raise NoInputGestureAction wasInSayAll = False if gesture.isModifier: if not _NVDA_InputManager.lastModifierWasInSayAll: wasInSayAll = _NVDA_InputManager.lastModifierWasInSayAll = sayAllHandler.isRunning( ) elif _NVDA_InputManager.lastModifierWasInSayAll: wasInSayAll = True _NVDA_InputManager.lastModifierWasInSayAll = False else: wasInSayAll = sayAllHandler.isRunning() if wasInSayAll: gesture.wasInSayAll = True speechEffect = gesture.speechEffectWhenExecuted if speechEffect == gesture.SPEECHEFFECT_CANCEL: queueHandler.queueFunction(queueHandler.eventQueue, speech.cancelSpeech) elif speechEffect in (gesture.SPEECHEFFECT_PAUSE, gesture.SPEECHEFFECT_RESUME): # noqa:E501 queueHandler.queueFunction( queueHandler.eventQueue, speech.pauseSpeech, speechEffect == gesture.SPEECHEFFECT_PAUSE) if py3 and gesture.shouldPreventSystemIdle: winKernel.SetThreadExecutionState(winKernel.ES_SYSTEM_REQUIRED | winKernel.ES_DISPLAY_REQUIRED) if log.isEnabledFor(log.IO) and not gesture.isModifier: _NVDA_InputManager._lastInputTime = time.time() log.io("Input: %s" % gesture.identifiers[0]) if _NVDA_InputManager._captureFunc: try: if _NVDA_InputManager._captureFunc(gesture) is False: return except: # noqa:E722 log.error("Error in capture function, disabling", exc_info=True) _NVDA_InputManager._captureFunc = None if gesture.isModifier: if gesture.noAction: gesture.normalizedModifiers = [] return raise NoInputGestureAction self.speakGesture(gesture) if not script: gesture.reportExtra() # then queue all following gestures # (that don't have a script # ) with a fake script so that they remain in order. if not script and ( bypassRemanence or scriptHandler._numIncompleteInterceptedCommandScripts): script = lambda gesture: gesture.send() # noqa:E731 if script: scriptHandler.queueScript(script, gesture) return raise NoInputGestureAction
def checkAndResizeImage(self, image): """ Check Image Size to meet requirement of API @param image: @type image: PIL.Image.Image @return: Resized image @rtype: PIL.Image.Image or bool """ isImageValid = True width = image.width height = image.height widthResizeFactor = 1 heightResizeFactor = 1 msg = u"Original size\nwidth:\n{w}\nheight:\n{h}".format(w=width, h=height) if width >= height: aspectRatio = width / height else: aspectRatio = height / width widthStatus = 0 heightStatus = 0 if aspectRatio > (self.maxHeight / self.minWidth): isImageValid = False # Translators: Reported when error occurred during image resizing errorMsg = _( u"Image aspect ratio is too big. Cannot resize properly for this engine." ) else: if self.minWidth <= width <= self.maxWidth: widthResizeFactor = 1 elif width < self.minWidth: widthResizeFactor = (float(self.minWidth) / width) + 1 else: widthResizeFactor = (float(self.maxWidth) / width) + 1 if self.minHeight <= height <= self.maxHeight: heightResizeFactor = 1 elif height < self.minHeight: heightResizeFactor = (float(self.minHeight) / height) + 1 else: heightResizeFactor = (float(self.maxHeight) / height) + 1 msg += u"\nwidthResizeFactor:\n{0}\nheightResizeFactor:\n{1}".format( widthResizeFactor, heightResizeFactor) # Translators: Reported when error occurred during image conversion errorMsg = _(u"Error occurred when converting images") if widthResizeFactor >= self.resizeUpperLimit: # Translators: Reported when image size is not valid errorMsg = _(u"Image width is too big for this engine") isImageValid = False if heightResizeFactor <= self.resizeLowerLimit: isImageValid = False # Translators: Reported when error occurred during image conversion errorMsg = _(u"Image height is too small for this engine") if widthResizeFactor >= self.resizeUpperLimit: # Translators: Reported when image size is not valid errorMsg = _(u"Image width is too big for this engine") isImageValid = False if heightResizeFactor <= self.resizeLowerLimit: isImageValid = False # Translators: Reported when error occurred during image conversion errorMsg = _(u"Image height is too small for this engine") self.imageInfo.resizeFactor = int( max(widthResizeFactor, heightResizeFactor)) if heightResizeFactor != 1 and widthResizeFactor != 1 and config.conf[ "onlineOCRGeneral"]["notifyIfResizeRequired"]: if gui.messageBox( # Translators: The confirmation prompt displayed when the image need to be resized. _("Image size is not proper for recognition. Do you want to resize? Press OK to continue. Press Cancel to cancel recognition" ), # Translators: The title of the confirmation dialog for the image need to be resized. _("Confirm Resize"), wx.OK | wx.CANCEL | wx.ICON_QUESTION, gui.mainFrame) != wx.OK: return if isImageValid: image = self.getResizedImage() width = image.width height = image.height msg += u"\nSize after resizing\nwidth:\n{w}\nheight:\n{h}".format( w=width, h=height) log.io(msg) if width * height > self.maxPixels: pixelCount = image.width * image.height while pixelCount >= self.maxPixels and image.width >= self.minWidth and image.height >= self.minHeight: self.imageInfo.resizeFactor = self.imageInfo.resizeFactor * self.asymptoticResizeFactor image = self.getResizedImage() if config.conf["onlineOCRGeneral"]["verboseDebugLogging"]: msg = "newWidth\n{0}\nnewHeight\n{1}\npixelCount\n{2}".format( image.width, image.height, pixelCount) log.io(msg) if image.width * image.height > self.maxPixels: isImageValid = False # Translators: Reported when error occurred during image resizing ui.message(_(u"Image has too many pixels.")) return False else: return self.getResizedImage() else: return self.getResizedImage() else: log.io(msg) ui.message(errorMsg) return False
def executeGesture(self, gesture): """Perform the action associated with a gesture. @param gesture: The gesture to execute. @type gesture: L{InputGesture} @raise NoInputGestureAction: If there is no action to perform. """ if watchdog.isAttemptingRecovery: # The core is dead, so don't try to perform an action. # This lets gestures pass through unhindered where possible, # as well as stopping a flood of actions when the core revives. raise NoInputGestureAction script = gesture.script if "brailleDisplayDrivers" in str(type(gesture)): if instanceGP.brailleKeyboardLocked and ( (hasattr(script, "__func__") and script.__func__.__name__ != "script_toggleLockBrailleKeyboard") or not hasattr(script, "__func__")): return if not config.conf["brailleExtender"][ 'stopSpeechUnknown'] and gesture.script == None: stopSpeech = False elif hasattr(script, "__func__") and (script.__func__.__name__ in [ "script_braille_dots", "script_braille_enter", "script_volumePlus", "script_volumeMinus", "script_toggleVolume", "script_hourDate", "script_ctrl", "script_alt", "script_nvda", "script_win", "script_ctrlAlt", "script_ctrlAltWin", "script_ctrlAltWinShift", "script_ctrlAltShift", "script_ctrlWin", "script_ctrlWinShift", "script_ctrlShift", "script_altWin", "script_altWinShift", "script_altShift", "script_winShift" ] or (not config.conf["brailleExtender"]['stopSpeechScroll'] and script.__func__.__name__ in ["script_braille_scrollBack", "script_braille_scrollForward"])): stopSpeech = False else: stopSpeech = True else: stopSpeech = True focus = api.getFocusObject() if focus.sleepMode is focus.SLEEP_FULL or (focus.sleepMode and not getattr( script, 'allowInSleepMode', False)): raise NoInputGestureAction wasInSayAll = False if gesture.isModifier: if not self.lastModifierWasInSayAll: wasInSayAll = self.lastModifierWasInSayAll = sayAllHandler.isRunning( ) elif self.lastModifierWasInSayAll: wasInSayAll = True self.lastModifierWasInSayAll = False else: wasInSayAll = sayAllHandler.isRunning() if wasInSayAll: gesture.wasInSayAll = True speechEffect = gesture.speechEffectWhenExecuted if not stopSpeech: pass elif speechEffect == gesture.SPEECHEFFECT_CANCEL: queueHandler.queueFunction(queueHandler.eventQueue, speech.cancelSpeech) elif speechEffect in (gesture.SPEECHEFFECT_PAUSE, gesture.SPEECHEFFECT_RESUME): queueHandler.queueFunction(queueHandler.eventQueue, speech.pauseSpeech, speechEffect == gesture.SPEECHEFFECT_PAUSE) if log.isEnabledFor(log.IO) and not gesture.isModifier: self._lastInputTime = time.time() log.io("Input: %s" % gesture.identifiers[0]) if self._captureFunc: try: if self._captureFunc(gesture) is False: return except BaseException: log.error("Error in capture function, disabling", exc_info=True) self._captureFunc = None if gesture.isModifier: raise NoInputGestureAction if config.conf["keyboard"][ "speakCommandKeys"] and gesture.shouldReportAsCommand: queueHandler.queueFunction(queueHandler.eventQueue, speech.speakMessage, gesture.displayName) gesture.reportExtra() # #2953: if an intercepted command Script (script that sends a gesture) is queued # then queue all following gestures (that don't have a script) with a fake script so that they remain in order. if not script and scriptHandler._numIncompleteInterceptedCommandScripts: script = lambda gesture: gesture.send() if script: scriptHandler.queueScript(script, gesture) return raise NoInputGestureAction
def extract_text(apiResult): words = [] for items in apiResult[u"result"]: words.append(items[u"content"]) log.io(items[u"content"]) return u" ".join(words)