def _getSelectionOffsets(self): fields = self._storyFieldsAndRects[0] startOffset = None endOffset = None curOffset = 0 inHighlightChunk = False for item in fields: if (isinstance(item, textInfos.FieldCommand) and item.command == "formatChange" and item.field.get( 'color', None) == RGB(red=255, green=255, blue=255)): inHighlightChunk = True if startOffset is None: startOffset = curOffset elif isinstance(item, six.string_types): try: import textUtils curOffset += textUtils.WideStringOffsetConverter( item).wideStringLength except ImportError: curOffset += len(item) if inHighlightChunk: endOffset = curOffset else: inHighlightChunk = False if startOffset is not None and endOffset is not None: return (startOffset, endOffset) offset = self._getCaretOffset() return offset, offset
def loadConfAttribra(): global confAttribra try: cfg = ConfigObj(cfgFileAttribra, encoding="UTF-8") for app, mapping in cfg.iteritems(): mappings = {} for name, value in mapping.iteritems(): if isinstance(value, basestring): if value.startswith("RGB("): # it's an RGB Object rgbval = value.split("RGB(")[1] rgbval = rgbval.split(")")[0] rgbval = rgbval.split(",") mappings[name] = [ RGB(int(rgbval[0]), int(rgbval[1]), int(rgbval[2]))] else: try: # if possible adds the value and its int mappings[name] = [value, int(value)] except ValueError: mappings[name] = [value] else: mappings[name] = value confAttribra[app] = mappings except IOError: log.debugWarning("No Attribra config file found")
def generate_rainbow(number): data = [] h = 0.0 for i in range(number): r, g, b = hsv_to_rgb(h, 1.0, 0.05) data.append(RGB(r, g, b)) h += 1.0 / number if h > 1: h = 0.0 return data
""" Handler for NVDA's legacy Windows Console support, used in situations where UIA isn't available. """ #: How often to check whether the console is dead (in ms). CHECK_DEAD_INTERVAL = 100 consoleObject = None #:The console window that is currently in the foreground. consoleWinEventHookHandles = [ ] #:a list of currently registered console win events. consoleOutputHandle = None checkDeadTimer = None CONSOLE_COLORS_TO_RGB = ( #http://en.wikipedia.org/wiki/Color_Graphics_Adapter RGB(0x00, 0x00, 0x00), #black RGB(0x00, 0x00, 0xAA), #blue RGB(0x00, 0xAA, 0x00), #green RGB(0x00, 0xAA, 0xAA), #cyan RGB(0xAA, 0x00, 0x00), #red RGB(0xAA, 0x00, 0xAA), #magenta RGB(0xAA, 0x55, 0x00), #brown RGB(0xAA, 0xAA, 0xAA), #white RGB(0x55, 0x55, 0x55), #gray RGB(0x55, 0x55, 0xFF), #light blue RGB(0x55, 0xFF, 0x55), #light green RGB(0x55, 0xFF, 0xFF), #light cyan RGB(0xFF, 0x55, 0x55), #light red RGB(0xFF, 0x55, 0xFF), #light magenta RGB(0xFF, 0xFF, 0x55), #yellow RGB(0xFF, 0xFF, 0xFF), #white (high intensity)
A higher width reduces the inner dimensions of the rectangle. Therefore, if you need to increase the outer dimensions of the rectangle, you need to increase the margin as well. @type width: int @ivar style: The style of the lines to be drawn; One of the C{winGDI.DashStyle*} enumeration constants. @type style: int @ivar margin: The number of pixels between the highlight's rectangle and the rectangle of the object to be highlighted. A higher margin stretches the highlight's rectangle. This value may also be negative. @type margin: int """ BLUE = RGB(0x03, 0x36, 0xFF) PINK = RGB(0xFF, 0x02, 0x66) YELLOW = RGB(0xFF, 0xDE, 0x03) DASH_BLUE = HighlightStyle(BLUE, 5, winGDI.DashStyleDash, 5) SOLID_PINK = HighlightStyle(PINK, 5, winGDI.DashStyleSolid, 5) SOLID_BLUE = HighlightStyle(BLUE, 5, winGDI.DashStyleSolid, 5) SOLID_YELLOW = HighlightStyle(YELLOW, 2, winGDI.DashStyleSolid, 2) class HighlightWindow(CustomWindow): transparency = 0xff className = u"NVDAHighlighter" windowName = u"NVDA Highlighter Window" windowStyle = winUser.WS_POPUP | winUser.WS_DISABLED extendedWindowStyle = winUser.WS_EX_TOPMOST | winUser.WS_EX_LAYERED transparentColor = 0 # Black
def _isALink(pos): return DanaEdit._TIMatchesCondition(pos, 'color', RGB(red=0, green=0, blue=192))
def extract_text(self, apiResult): entries = [] if "categories" in apiResult: # Translators: Result label for azure image analyzer entries.append(_(u"Categories:")) # Translators: Result label for azure image analyzer entries.append("{number} categories detected.".format( number=len(apiResult["categories"]))) for category in apiResult["categories"]: entries.append(category["name"]) if "detail" in category: if "celebrities" in category["detail"]: # Translators: Result label for azure image analyzer entries.append("{number} celebrities detected.".format( number=len(category["detail"]["celebrities"]))) for celebrity in category["detail"]["celebrities"]: entries.append(celebrity["name"]) if "landmarks" in category["detail"]: # Translators: Result label for azure image analyzer entries.append("{number} landmarks detected.".format( number=len(category["detail"]["landmarks"]))) for landmark in category["detail"]["landmarks"]: entries.append(landmark["name"]) if "adult" in apiResult: # Translators: Result label for azure image analyzer entries.append(_(u"Adult content detection:")) if apiResult["adult"]["isAdultContent"]: # Translators: Result label for azure image analyzer entries.append(_(u"This image contains adult content")) else: # Translators: Result label for azure image analyzer entries.append(_(u"This image does not contain adult content")) if apiResult["adult"]["isRacyContent"]: # Translators: Result label for azure image analyzer entries.append(_(u"This image contains racy content")) else: # Translators: Result label for azure image analyzer entries.append(_(u"This image does not contain racy content")) if "color" in apiResult: # Translators: Result label for azure image analyzer entries.append(_(u"Color detection:")) # Translators: Result label for azure image analyzer colorMsg = _( u"Dominant foreground color is {foreGroundColor}.\n Dominant background color is {backGroundColor}." ) entries.append( colorMsg.format( foreGroundColor=apiResult["color"] ["dominantColorForeground"], backGroundColor=apiResult["color"] ["dominantColorBackground"], )) hexAccentColor = apiResult["color"]["accentColor"] r, g, b = unpack("BBB", hexAccentColor.decode("hex")) rgbAccentColor = RGB(r, g, b) # Translators: Result label for azure image analyzer entries.append( _("Accent color is {color}, its hex code is {hex}.".format( hex=apiResult["color"]["accentColor"], color=rgbAccentColor.name))) # Translators: Result label for azure image analyzer entries.append(_("Dominant colors:")) for color in apiResult["color"]["dominantColors"]: entries.append(color) if apiResult["color"]["isBWImg"]: # Translators: Result label for azure image analyzer entries.append(_(u"The image is black and white.")) else: # Translators: Result label for azure image analyzer entries.append(_(u"The image is not black and white.")) if "tags" in apiResult and len(apiResult["tags"]) > 0: # Translators: Result label for azure image analyzer entries.append("{number} tags detected.".format( number=len(apiResult["tags"]))) for tag in apiResult["tags"]: entries.append(tag["name"]) if "imageType" in apiResult: # Translators: Result label for azure image analyzer entries.append(_(u"Detected image type:")) if apiResult["imageType"]["clipArtType"] == 0: # Translators: Result label for azure image analyzer entries.append(_(u"The image is not a clip-art.")) elif apiResult["imageType"]["clipArtType"] == 1: # Translators: Result label for azure image analyzer entries.append(_(u"Cannot tell whether is image is clip-art")) elif apiResult["imageType"]["clipArtType"] == 2: # Translators: Result label for azure image analyzer entries.append(_(u"The image is Normal-clip-art")) elif apiResult["imageType"]["clipArtType"] == 3: # Translators: Result label for azure image analyzer entries.append(_(u"The image is Good-clip-art")) if apiResult["imageType"]["lineDrawingType"] == 1: # Translators: Result label for azure image analyzer entries.append(_(u"The image is a lineDrawing")) else: # Translators: Result label for azure image analyzer entries.append(_(u"The image is not a lineDrawing")) if "description" in apiResult: # Translators: Result label for azure image analyzer entries.append("{number} results available.".format( number=len(apiResult["description"]["captions"]))) for desc in apiResult["description"]["captions"]: entries.append(desc["text"]) if "objects" in apiResult and len(apiResult["objects"]) > 0: # Translators: Result label for azure image analyzer entries.append( _(u"{number} objects detected.".format( number=len(apiResult["objects"])))) resultSets = apiResult["objects"] if self.text_result: for result in resultSets: entries.append(result["object"]) if "brands" in apiResult and len(apiResult["brands"]) > 0: # Translators: Result label for azure image analyzer entries.append( _(u"{number} brands detected.".format( number=len(apiResult["brands"])))) resultSets = apiResult["brands"] if self.text_result: for result in resultSets: entries.append(result["name"]) if "faces" in apiResult and len(apiResult["faces"]) > 0: # Translators: Result label for azure image analyzer entries.append( _(u"{number} faces detected.".format( number=len(apiResult["faces"])))) if self.text_result: resultSets = apiResult["faces"] for result in resultSets: entries.append(self.getFaceDescription(result)) return u"\r\n".join(entries)