コード例 #1
0
 def setMode(self, modeID, report=False):
     self.curAudioScreenMode = modeID
     modeInfo = self.audioScreenModes[modeID]
     if self.imagePlayer:
         imagePlayer = self.imagePlayer
         self.imagePlayer = None
         imagePlayer.terminate()
     self.screenBitmap = None
     if modeInfo[1] is None:
         if report: ui.message(_("AudioScreen off"))
     else:
         modeConf = {
             k: v
             for k, v in config.conf["audioScreen_%s" %
                                     modeInfo[1].__name__].iteritems()
         }
         self.imagePlayer = modeInfo[1](**modeConf)
         self.screenBitmap = screenBitmap.ScreenBitmap(
             self.imagePlayer.width, self.imagePlayer.height)
         if report:
             inputType = _("touch input") if touchHandler.handler else _(
                 "mouse input")
             ui.message(
                 _("AudioScreen mode {mode}, {inputType}").format(
                     mode=modeInfo[0], inputType=inputType))
コード例 #2
0
def recognizeNavigatorObject(recognizer):
	"""User interface function to recognize content in the navigator object.
	This should be called from a script or in response to a GUI action.
	@param recognizer: The content recognizer to use.
	@type recognizer: L{contentRecog.ContentRecognizer}
	"""
	global _activeRecog
	if isinstance(api.getFocusObject(), RecogResultNVDAObject):
		# Translators: Reported when content recognition (e.g. OCR) is attempted,
		# but the user is already reading a content recognition result.
		ui.message(_("Already in a content recognition result"))
		return
	nav = api.getNavigatorObject()
	# Translators: Reported when content recognition (e.g. OCR) is attempted,
	# but the content is not visible.
	notVisibleMsg = _("Content is not visible")
	try:
		left, top, width, height = nav.location
	except TypeError:
		log.debugWarning("Object returned location %r" % nav.location)
		ui.message(notVisibleMsg)
		return
	try:
		imgInfo = RecogImageInfo.createFromRecognizer(left, top, width, height, recognizer)
	except ValueError:
		ui.message(notVisibleMsg)
		return
	if _activeRecog:
		_activeRecog.cancel()
	# Translators: Reporting when content recognition (e.g. OCR) begins.
	ui.message(_("Recognizing"))
	sb = screenBitmap.ScreenBitmap(imgInfo.recogWidth, imgInfo.recogHeight)
	pixels = sb.captureImage(left, top, width, height)
	_activeRecog = recognizer
	recognizer.recognize(pixels, imgInfo, _recogOnResult)
コード例 #3
0
def initialize():
	global curMousePos, scrBmpObj
	scrBmpObj=screenBitmap.ScreenBitmap(1,1)
	(x,y)=winUser.getCursorPos()
	desktopObject=api.getDesktopObject()
	try:
		mouseObject=desktopObject.objectFromPoint(x,y)
	except:
		log.exception("Error retrieving initial mouse object")
		mouseObject=None
	if not mouseObject:
		mouseObject=api.getDesktopObject()
	api.setMouseObject(mouseObject)
	curMousePos=(x,y)
	winInputHook.initialize()
	winInputHook.setCallbacks(mouse=internal_mouseEvent)
コード例 #4
0
def recognizeNavigatorObject(recognizer, filterNonGraphic=True):
    """User interface function to recognize content in the navigator object.
	This should be called from a script or in response to a GUI action.
	@param recognizer: The content recognizer to use.
	@type recognizer: L{contentRecog.ContentRecognizer}
	"""
    if isinstance(api.getFocusObject(), RecogResultNVDAObject):
        # Translators: Reported when content recognition is attempted, but the user is already reading a
        # content recognition result.
        ui.message(_("Already in a content recognition result"))
        return

    # Get the object that currently has system focus
    obj = api.getFocusObject()
    # treeInterceptor may be None is some cases. If so, use the navigator object instead.
    if obj.treeInterceptor:
        isFocusModeEnabled = obj.treeInterceptor.passThrough
        # if Focus mode is enabled we must check if any child of the focus object is graphic because it
        # itself cannot be graphic
        if isFocusModeEnabled:
            recognizer.checkChildren = True
        # if focus mode is disabled, use the navigator object
        else:
            obj = api.getNavigatorObject()
    else:
        obj = api.getNavigatorObject()

    # if filterNonGraphic True, validate the object. If invalid end the recognition process
    if filterNonGraphic and not recognizer.validateObject(obj):
        return
    # Translators: Reported when content recognition is attempted, but the content is not visible.
    notVisibleMsg = _("Content is not visible")
    try:
        left, top, width, height = obj.location
    except TypeError:
        log.debugWarning("Object returned location %r" % obj.location)
        ui.message(notVisibleMsg)
        return
    # If the object bounds are not valid, end the recognition process.
    if not recognizer.validateBounds(obj.location):
        return
    try:
        imgInfo = RecogImageInfo.createFromRecognizer(left, top, width, height,
                                                      recognizer)
    except ValueError:
        ui.message(notVisibleMsg)
        return

    global _activeRecog
    if _activeRecog:
        # If a recognition process is already occurring and a new one is started after more than 3 seconds,
        # warn the user and block the new recognition process. If the delay is less than 3 seconds, the
        # user probably pressed the gesture multiple times so cancel the old process and let the new one
        # continue. Cancelling the old process results in errors that cause both recognition processes to
        # fail so we just update the resultHandlerClass of the previous recognition process instead of
        # cancelling it and end the new recognition process here.
        if ((time.time() - _activeRecog.timeCreated) <=
                3) and (_activeRecog.resultHandlerClass != BrowseableResult):
            _activeRecog.resultHandlerClass = BrowseableResult
            # Translators: Reporting when content recognition begins.
            ui.message(_("Recognizing"))
        else:
            ui.message(
                "Already running an image captioning process. Please try again later."
            )
        return

    # capture object pixels
    sb = screenBitmap.ScreenBitmap(imgInfo.recogWidth, imgInfo.recogHeight)
    pixels = sb.captureImage(left, top, width, height)

    # calculate L{imageHash} using the inbuilt hash function. Only one channel is used to calculate the
    # hash to save time but all pixels in that channel must be used since using only part of the image may
    # cause false cache hits for images with padding.
    rowHashes = []
    for i in range(imgInfo.recogWidth):
        row = []
        for j in range(imgInfo.recogHeight):
            row.append(pixels[j][i].rgbRed)  # column major order
        rowHashes.append(hash(str(row)))
    imageHash = hash(str(rowHashes))

    global _cachedResults
    # check if the hash of the current object matches that of any previous result
    for result in _cachedResults:
        # if a match is found, call the recognizer's I{getResultHandler} method with the cached result and
        # end the current recognition process here.
        if result[0] == imageHash:
            handler = recognizer.getResultHandler(result)
            return

    # Translators: Reporting when content recognition begins.
    ui.message(_("Recognizing"))
    # Store a copy of the recognizer before image captioning really starts. This can also be used to check
    # recognition process is active
    _activeRecog = recognizer

    recognizer.recognize(imageHash, pixels, imgInfo, _recogOnResult)