예제 #1
0
def isValidEncoding(enc):
    """Checks if it is a valid encoding"""
    norm_enc = encodings.normalize_encoding(enc).lower()
    if norm_enc in SUPPORTED_CODECS:
        return True
    if norm_enc in [
            encodings.normalize_encoding(supp_enc)
            for supp_enc in SUPPORTED_CODECS
    ]:
        return True

    # Check the aliases as well
    if norm_enc in encodings.aliases.aliases:
        return True
    return False
예제 #2
0
def _finder(name: str) -> Optional[codecs.CodecInfo]:
    name = encodings.normalize_encoding(name)
    try:
        m = NAME_PATTERN.match(name.strip())
        if not m:
            return None
        names = m.group(1).split('-')
        rewriters = []
        for n in names:
            if n not in REWRITER_REGISTER:
                try:
                    __import__('rewrite_magic.' + n)
                except ImportError as exc:
                    if exc.name != 'rewrite_magic.' + n:
                        raise
                if n not in REWRITER_REGISTER:
                    warning(
                        f"Unknown rewrite coding {n!r}. We aren't going to do anything."
                    )
                    return None
            rewriters.append(REWRITER_REGISTER[n])
        codec = RewriteCodec(rewriters)
        incdec, strred = get_incremental(codec)
        return codecs.CodecInfo(encode=codec.encode,
                                decode=codec.decode,
                                streamwriter=utf_8.streamwriter,
                                streamreader=strred,
                                incrementalencoder=utf_8.incrementalencoder,
                                incrementaldecoder=incdec,
                                name=name)
    except Exception:
        sys.excepthook(*sys.exc_info())
        raise
예제 #3
0
    def x_text(self):
        """
        Extract Text Fields

        @todo: handle multiple strings seperated by \x00

        sets: encoding, strings
        """
        data = self.rawdata
        self.encoding = encodings[ord(data[0])]
        rawtext = data[1:]

        if normalize_encoding(self.encoding) == 'latin_1':
            text = rawtext
            self.strings = text.split('\x00')
        else:
            text = rawtext.decode(self.encoding)
            if is_double_byte(self.encoding):
                self.strings = text.split('\x00\x00')
            else:
                self.strings = text.split('\x00')

        try:
            dummy = text.encode('utf_8')
            debug('Read Field: %s Len: %d Enc: %s Text: %s' %
                  (self.fid, self.length, self.encoding, str([text])))
        except UnicodeDecodeError:
            debug('Read Field: %s Len: %d Enc: %s Text: %s (Err)' %
                  (self.fid, self.length, self.encoding, str([text])))
예제 #4
0
def search_function(name):
    name = encodings.normalize_encoding(name)  # Rather undocumented...
    if name in _extended_encodings:
        if name not in _cache:
            base_encoding, mapping = _extended_encodings[name]
            assert (name[-4:] == "_ttx")
            # Python 2 didn't have any of the encodings that we are implementing
            # in this file.  Python 3 added aliases for the East Asian ones, mapping
            # them "temporarily" to the same base encoding as us, with a comment
            # suggesting that full implementation will appear some time later.
            # As such, try the Python version of the x_mac_... first, if that is found,
            # use *that* as our base encoding.  This would make our encoding upgrade
            # to the full encoding when and if Python finally implements that.
            # http://bugs.python.org/issue24041
            base_encodings = [name[:-4], base_encoding]
            for base_encoding in base_encodings:
                try:
                    codecs.lookup(base_encoding)
                except LookupError:
                    continue
                _cache[name] = ExtendCodec(name, base_encoding, mapping)
                break
        return _cache[name].info

    return None
예제 #5
0
def getNormalizedEncoding(enc, validityCheck=True):
    """Returns a normalized encoding or throws an exception"""
    if validityCheck:
        if not isValidEncoding(enc):
            raise Exception('Unsupported encoding ' + enc)
    norm_enc = encodings.normalize_encoding(enc).lower()
    return encodings.aliases.aliases.get(norm_enc, norm_enc)
예제 #6
0
def normalize(localename):
    fullname = localename.lower()
    if ':' in fullname:
        fullname = fullname.replace(':', '.')
    if '.' in fullname:
        langname, encoding = fullname.split('.')[:2]
        fullname = langname + '.' + encoding
    else:
        langname = fullname
        encoding = ''
    norm_encoding = encoding.replace('-', '')
    norm_encoding = norm_encoding.replace('_', '')
    lookup_name = langname + '.' + encoding
    code = locale_alias.get(lookup_name, None)
    if code is not None:
        return code
    code = locale_alias.get(langname, None)
    if code is not None:
        if '.' in code:
            langname, defenc = code.split('.')
        else:
            langname = code
            defenc = ''
        if encoding:
            norm_encoding = encodings.normalize_encoding(encoding)
            norm_encoding = encodings.aliases.aliases.get(norm_encoding, norm_encoding)
            encoding = locale_encoding_alias.get(norm_encoding, norm_encoding)
        else:
            encoding = defenc
        if encoding:
            return langname + '.' + encoding
        else:
            return langname
    else:
        return localename
예제 #7
0
파일: codecs.py 프로젝트: behdad/fonttools
def search_function(name):
	name = encodings.normalize_encoding(name) # Rather undocumented...
	if name in _extended_encodings:
		if name not in _cache:
			base_encoding, mapping = _extended_encodings[name]
			assert(name[-4:] == "_ttx")
			# Python 2 didn't have any of the encodings that we are implementing
			# in this file.  Python 3 added aliases for the East Asian ones, mapping
			# them "temporarily" to the same base encoding as us, with a comment
			# suggesting that full implementation will appear some time later.
			# As such, try the Python version of the x_mac_... first, if that is found,
			# use *that* as our base encoding.  This would make our encoding upgrade
			# to the full encoding when and if Python finally implements that.
			# http://bugs.python.org/issue24041
			base_encodings = [name[:-4], base_encoding]
			for base_encoding in base_encodings:
				try:
					codecs.lookup(base_encoding)
				except LookupError:
					continue
				_cache[name] = ExtendCodec(name, base_encoding, mapping)
				break
		return _cache[name].info

	return None
예제 #8
0
def search_function(encoding):
    """
    Register our "bad codecs" with Python's codecs API. This involves adding
    a search function that takes in an encoding name, and returns a codec
    for that encoding if it knows one, or None if it doesn't.

    The encodings this will match are:

    - Encodings of the form 'sloppy-windows-NNNN' or 'sloppy-iso-8859-N',
      where the non-sloppy version is an encoding that leaves some bytes
      unmapped to characters.
    - The 'utf-8-variants' encoding, which has the several aliases seen
      above.
    """
    if encoding in _CACHE:
        return _CACHE[encoding]

    norm_encoding = normalize_encoding(encoding)
    codec = None
    if norm_encoding in UTF8_VAR_NAMES:
        from ftfy.bad_codecs.utf8_variants import CODEC_INFO
        codec = CODEC_INFO
    elif norm_encoding.startswith('sloppy_'):
        from ftfy.bad_codecs.sloppy import CODECS
        codec = CODECS.get(norm_encoding)

    if codec is not None:
        _CACHE[encoding] = codec

    return codec
예제 #9
0
def normalize(localename):
    fullname = localename.lower()
    if ':' in fullname:
        fullname = fullname.replace(':', '.')
    if '.' in fullname:
        langname, encoding = fullname.split('.')[:2]
        fullname = langname + '.' + encoding
    else:
        langname = fullname
        encoding = ''
    norm_encoding = encoding.replace('-', '')
    norm_encoding = norm_encoding.replace('_', '')
    lookup_name = langname + '.' + encoding
    code = locale_alias.get(lookup_name, None)
    if code is not None:
        return code
    code = locale_alias.get(langname, None)
    if code is not None:
        if '.' in code:
            langname, defenc = code.split('.')
        else:
            langname = code
            defenc = ''
        if encoding:
            norm_encoding = encodings.normalize_encoding(encoding)
            norm_encoding = encodings.aliases.aliases.get(norm_encoding, norm_encoding)
            encoding = locale_encoding_alias.get(norm_encoding, norm_encoding)
        else:
            encoding = defenc
        if encoding:
            return langname + '.' + encoding
        else:
            return langname
    else:
        return localename
예제 #10
0
def normalize_encoding(encoding):
    "Returns a normalized form of the encoding."
    import encodings
    norm = encodings.normalize_encoding(encoding).lower()
    if norm in encodings.aliases.aliases.values():
        return norm
    return encodings.aliases.aliases.get(norm)
예제 #11
0
    def __init__(
        self,
        parent,
        id=wx.ID_ANY,
        msg=u"",
        title=u"",
        elist=list(),
        default=u"",
        style=wx.CAPTION,
        pos=wx.DefaultPosition,
        name=EncodingDialogNameStr,
    ):
        """Create the encoding dialog
        @keyword msg: Dialog Message
        @keyword title: Dialog Title
        @keyword encodings: list of encodings to use or None to use all
        @keyword default: Default selected encoding

        """
        if not len(elist):
            elist = GetAllEncodings()

        default = encodings.normalize_encoding(default)
        if default and default.lower() in elist:
            sel = default.lower()
        else:
            sel = locale.getpreferredencoding(False)

        choicedlg.ChoiceDialog.__init__(self, parent, id, msg, title, elist, sel, pos, style)
예제 #12
0
    def x_text(self):
        """
        Extract Text Fields

        @todo: handle multiple strings seperated by \x00

        sets: encoding, strings
        """
        data = self.rawdata
        self.encoding = encodings[ord(data[0])]
        rawtext = data[1:]
        
        if normalize_encoding(self.encoding) == 'latin_1':
            text = rawtext
            self.strings = text.split('\x00')
        else:
            text = rawtext.decode(self.encoding)
            if is_double_byte(self.encoding):
                self.strings = text.split('\x00\x00')               
            else:
                self.strings = text.split('\x00')
                
        try:
            dummy = text.encode('utf_8')
            debug('Read Field: %s Len: %d Enc: %s Text: %s' %
                   (self.fid, self.length, self.encoding, str([text])))
        except UnicodeDecodeError:
            debug('Read Field: %s Len: %d Enc: %s Text: %s (Err)' %
                   (self.fid, self.length, self.encoding, str([text])))
예제 #13
0
파일: locale.py 프로젝트: Reve/eve
def normalize(localename):
    fullname = localename.lower()
    if ":" in fullname:
        fullname = fullname.replace(":", ".")
    if "." in fullname:
        langname, encoding = fullname.split(".")[:2]
        fullname = langname + "." + encoding
    else:
        langname = fullname
        encoding = ""
    norm_encoding = encoding.replace("-", "")
    norm_encoding = norm_encoding.replace("_", "")
    lookup_name = langname + "." + encoding
    code = locale_alias.get(lookup_name, None)
    if code is not None:
        return code
    code = locale_alias.get(langname, None)
    if code is not None:
        if "." in code:
            langname, defenc = code.split(".")
        else:
            langname = code
            defenc = ""
        if encoding:
            norm_encoding = encodings.normalize_encoding(encoding)
            norm_encoding = encodings.aliases.aliases.get(norm_encoding, norm_encoding)
            encoding = locale_encoding_alias.get(norm_encoding, norm_encoding)
        else:
            encoding = defenc
        if encoding:
            return langname + "." + encoding
        else:
            return langname
    else:
        return localename
예제 #14
0
def _c18n_encoding(encoding):
    """Cannonicalize an encoding name
    This performs normalization and translates aliases using python's
    encoding aliases
    """
    normed = encodings.normalize_encoding(encoding).lower()
    return encodings.aliases.aliases.get(normed, normed)
예제 #15
0
 def search_function(name):
     import encodings
     name = encodings.normalize_encoding(name)
     if name == "css":
         return (encode, decode, StreamReader, StreamWriter)
     elif name == "utf_8_sig":
         return (utf8sig_encode, utf8sig_decode, UTF8SigStreamReader, UTF8SigStreamWriter)
예제 #16
0
파일: encdlg.py 프로젝트: wangdyna/wxPython
    def __init__(self,
                 parent,
                 id=wx.ID_ANY,
                 msg=u'',
                 title=u'',
                 elist=list(),
                 default=u'',
                 style=wx.CAPTION,
                 pos=wx.DefaultPosition,
                 name=EncodingDialogNameStr):
        """Create the encoding dialog
        @keyword msg: Dialog Message
        @keyword title: Dialog Title
        @keyword encodings: list of encodings to use or None to use all
        @keyword default: Default selected encoding

        """
        if not len(elist):
            elist = GetAllEncodings()

        default = encodings.normalize_encoding(default)
        if default and default.lower() in elist:
            sel = default.lower()
        else:
            sel = locale.getpreferredencoding(False)

        choicedlg.ChoiceDialog.__init__(self, parent, id, msg, title, elist,
                                        sel, pos, style)
예제 #17
0
def search_function(encoding):
    """
    Register our "bad codecs" with Python's codecs API. This involves adding
    a search function that takes in an encoding name, and returns a codec
    for that encoding if it knows one, or None if it doesn't.

    The encodings this will match are:

    - Encodings of the form 'sloppy-windows-NNNN' or 'sloppy-iso-8859-N',
      where the non-sloppy version is an encoding that leaves some bytes
      unmapped to characters.
    - The 'utf-8-variants' encoding, which has the several aliases seen
      above.
    """
    if encoding in _CACHE:
        return _CACHE[encoding]

    norm_encoding = normalize_encoding(encoding)
    codec = None
    if norm_encoding in UTF8_VAR_NAMES:
        from ftfy.bad_codecs.utf8_variants import CODEC_INFO

        codec = CODEC_INFO
    elif norm_encoding.startswith("sloppy_"):
        from ftfy.bad_codecs.sloppy import CODECS

        codec = CODECS.get(norm_encoding)

    if codec is not None:
        _CACHE[encoding] = codec

    return codec
예제 #18
0
파일: encoding.py 프로젝트: janrygl/sir
def normalize_encoding(encoding):
    "Returns a normalized form of the encoding."
    import encodings
    norm = encodings.normalize_encoding(encoding).lower()
    if norm in encodings.aliases.aliases.values():
        return norm
    return encodings.aliases.aliases.get(norm)
예제 #19
0
    def __init__(self, parent, msg=u'', elist=None, default=u''):
        """Create the panel
        @keyword msg: Display message
        @keyword elist: list of encodings to show or None to show all
        @keyword default: default encoding selection

        """
        wx.Panel.__init__(self, parent)

        # Attributes
        self._msg = msg
        self._encs = wx.Choice(self, wx.ID_ANY)
        self._selection = default
        self._bmp = None

        # Setup
        if elist is None:
            elist = GetAllEncodings()

        self._encs.SetItems(elist)
        default = encodings.normalize_encoding(default)
        if default and default.lower() in elist:
            self._encs.SetStringSelection(default)
        else:
            self._encs.SetStringSelection(locale.getpreferredencoding(False))
            self._selection = self._encs.GetStringSelection()

        # Layout
        self.__DoLayout()

        # Event Handlers
        self.Bind(wx.EVT_CHOICE, self.OnChoice, self._encs)
예제 #20
0
 def search_function(name):
     import encodings
     name = encodings.normalize_encoding(name)
     if name == "css":
         return (encode, decode, StreamReader, StreamWriter)
     elif name == "utf_8_sig":
         return (utf8sig_encode, utf8sig_decode, UTF8SigStreamReader, UTF8SigStreamWriter)
예제 #21
0
파일: encdlg.py 프로젝트: wangdyna/wxPython
    def __init__(self, parent, id=wx.ID_ANY, msg=u'', title=u'',
                  elist=list(), default=u'',
                  style=wx.CAPTION, pos=wx.DefaultPosition,
                  size=wx.DefaultSize,
                  name=EncodingDialogNameStr):
        """Create the encoding dialog
        @param parent: Parent Window
        @keyword id: Dialog ID
        @keyword msg: Dialog Message
        @keyword title: Dialog Title
        @keyword elist: list of encodings to use or None to use all
        @keyword default: Default selected encoding
        @keyword style: Dialog Style bitmask
        @keyword pos: Dialog Postion
        @keyword size: Dialog Size
        @keyword name: Dialog Name

        """
        if not len(elist):
            elist = GetAllEncodings()

        default = encodings.normalize_encoding(default)
        if default and default.lower() in elist:
            sel = default.lower()
        else:
            sel = locale.getpreferredencoding(False)

        super(EncodingDialog, self).__init__(parent, id, msg, title,
                                             elist, sel, pos, size, style)
예제 #22
0
def find_encodings(enc=None, system=False):
    """Find functions for encoding translations for a specific codec.

    :param str enc: The codec to find translation functions for. It will be
                    normalized by converting to lowercase, excluding
                    everything which is not ascii, and hyphens will be
                    converted to underscores.

    :param bool system: If True, find encodings based on the system's stdin
                        encoding, otherwise assume utf-8.

    :raises: :exc:LookupError if the normalized codec, ``enc``, cannot be
             found in Python's encoding translation map.
    """
    if not enc:
        enc = 'utf-8'

    if system:
        if getattr(sys.stdin, 'encoding', None) is None:
            enc = sys.stdin.encoding
            log.debug("Obtained encoding from stdin: %s" % enc)
        else:
            enc = 'ascii'

    ## have to have lowercase to work, see
    ## http://docs.python.org/dev/library/codecs.html#standard-encodings
    enc = enc.lower()
    codec_alias = encodings.normalize_encoding(enc)

    codecs.register(encodings.search_function)
    coder = codecs.lookup(codec_alias)

    return coder
예제 #23
0
파일: encdlg.py 프로젝트: Diapolo10/editra
    def __init__(self, parent, id=wx.ID_ANY, msg='', title='',
                  elist=list(), default='',
                  style=wx.CAPTION, pos=wx.DefaultPosition,
                  size=wx.DefaultSize,
                  name=EncodingDialogNameStr):
        """Create the encoding dialog
        @param parent: Parent Window
        @keyword id: Dialog ID
        @keyword msg: Dialog Message
        @keyword title: Dialog Title
        @keyword elist: list of encodings to use or None to use all
        @keyword default: Default selected encoding
        @keyword style: Dialog Style bitmask
        @keyword pos: Dialog Postion
        @keyword size: Dialog Size
        @keyword name: Dialog Name

        """
        if not len(elist):
            elist = GetAllEncodings()

        default = encodings.normalize_encoding(default)
        if default and default.lower() in elist:
            sel = default.lower()
        else:
            sel = locale.getpreferredencoding(False)

        super(EncodingDialog, self).__init__(parent, id, msg, title,
                                             elist, sel, pos, size, style)
예제 #24
0
    def _loadMetrics(self, afmFileName):
        """Loads in and parses font metrics"""
        #assert os.path.isfile(afmFileName), "AFM file %s not found" % afmFileName
        afmFileName = bruteForceSearchForFile(afmFileName)
        (topLevel, glyphData) = parseAFMFile(afmFileName)

        self.name = topLevel['FontName']
        self.familyName = topLevel['FamilyName']
        self.ascent = topLevel.get('Ascender', 1000)
        self.descent = topLevel.get('Descender', 0)
        self.capHeight = topLevel.get('CapHeight', 1000)
        self.italicAngle = topLevel.get('ItalicAngle', 0)
        self.stemV = topLevel.get('stemV', 0)
        self.xHeight = topLevel.get('XHeight', 1000)

        strBbox = topLevel.get('FontBBox', [0,0,1000,1000])
        tokens = strBbox.split()
        self.bbox = []
        for tok in tokens:
            self.bbox.append(int(tok))

        glyphWidths = {}
        for (cid, width, name) in glyphData:
            glyphWidths[name] = width
        self.glyphWidths = glyphWidths
        self.glyphNames = list(glyphWidths.keys())
        self.glyphNames.sort()

        # for font-specific encodings like Symbol, Dingbats, Carta we
        # need to make a new encoding as well....
        if topLevel.get('EncodingScheme', None) == 'FontSpecific':
            global _postScriptNames2Unicode
            if _postScriptNames2Unicode is None:
                try:
                    from reportlab.pdfbase._glyphlist import _glyphname2unicode
                    _postScriptNames2Unicode = _glyphname2unicode
                    del _glyphname2unicode
                except:
                    _postScriptNames2Unicode = {}
                    raise ValueError(
                            "cannot import module reportlab.pdfbase._glyphlist module\n"
                            "you can obtain a version from here\n"
                            "https://www.reportlab.com/ftp/_glyphlist.py\n"
                            )

            names = [None] * 256
            ex = {}
            rex  = {}
            for (code, width, name) in glyphData:
                if 0<=code<=255:
                    names[code] = name
                    u = _postScriptNames2Unicode.get(name,None)
                    if u is not None:
                        rex[code] = u
                        ex[u] = code
            encName = encodings.normalize_encoding('rl-dynamic-%s-encoding' % self.name)
            rl_codecs.RL_Codecs.add_dynamic_codec(encName,ex,rex)
            self.requiredEncoding = encName
            enc = Encoding(encName, names)
            registerEncoding(enc)
예제 #25
0
def find_encodings(enc=None, system=False):
    """Find functions for encoding translations for a specific codec.

    :param str enc: The codec to find translation functions for. It will be
                    normalized by converting to lowercase, excluding
                    everything which is not ascii, and hyphens will be
                    converted to underscores.

    :param bool system: If True, find encodings based on the system's stdin
                        encoding, otherwise assume utf-8.

    :raises: :exc:LookupError if the normalized codec, ``enc``, cannot be
             found in Python's encoding translation map.
    """
    if not enc:
        enc = 'utf-8'

    if system:
        if getattr(sys.stdin, 'encoding', None) is None:
            enc = sys.stdin.encoding
            log.debug("Obtained encoding from stdin: %s" % enc)
        else:
            enc = 'ascii'

    ## have to have lowercase to work, see
    ## http://docs.python.org/dev/library/codecs.html#standard-encodings
    enc = enc.lower()
    codec_alias = encodings.normalize_encoding(enc)

    codecs.register(encodings.search_function)
    coder = codecs.lookup(codec_alias)

    return coder
예제 #26
0
    def __init__(
        self,
        binary=None,
        home=None,
        keyring=None,
        secring=None,
        use_agent=False,
        default_preference_list=None,
        verbose=False,
        options=None,
    ):

        self.binary = _util._find_binary(binary)
        self.homedir = home if home else _util._conf
        pub = _parsers._fix_unsafe(keyring) if keyring else "pubring.gpg"
        sec = _parsers._fix_unsafe(secring) if secring else "secring.gpg"
        self.keyring = os.path.join(self._homedir, pub)
        self.secring = os.path.join(self._homedir, sec)
        self.options = _parsers._sanitise(options) if options else None

        if default_preference_list:
            self._prefs = _check_preferences(default_preference_list, "all")
        else:
            self._prefs = "SHA512 SHA384 SHA256 AES256 CAMELLIA256 TWOFISH"
            self._prefs += " AES192 ZLIB ZIP Uncompressed"

        encoding = locale.getpreferredencoding()
        if encoding is None:  # This happens on Jython!
            encoding = sys.stdin.encoding
        self._encoding = encoding.lower().replace("-", "_")
        self._filesystemencoding = encodings.normalize_encoding(sys.getfilesystemencoding().lower())

        self._keyserver = "hkp://subkeys.pgp.net"
        self.__generated_keys = os.path.join(self.homedir, "generated-keys")

        try:
            assert self.binary, "Could not find binary %s" % binary
            assert isinstance(verbose, (bool, str, int)), "'verbose' must be boolean, string, or 0 <= n <= 9"
            assert isinstance(use_agent, bool), "'use_agent' must be boolean"
            if self.options is not None:
                assert isinstance(self.options, str), "options not string"
        except (AssertionError, AttributeError) as ae:
            log.error("GPGBase.__init__(): %s" % ae.message)
            raise RuntimeError(ae.message)
        else:
            if verbose is True:
                # The caller wants logging, but we need a valid --debug-level
                # for gpg. Default to "basic", and warn about the ambiguity.
                # (garrettr)
                verbose = "basic"
                log.warning('GPG(verbose=True) is ambiguous, defaulting to "basic" logging')
            self.verbose = verbose
            self.use_agent = use_agent

        if hasattr(self, "_agent_proc") and getattr(self, "_remove_agent", None) is True:
            if hasattr(self, "__remove_path__"):
                self.__remove_path__("pinentry")
예제 #27
0
def _replace_encoding(code, encoding):
    if '.' in code:
        langname = code[:code.index('.')]
    else:
        langname = code
    norm_encoding = encodings.normalize_encoding(encoding)
    norm_encoding = encodings.aliases.aliases.get(norm_encoding, norm_encoding)
    encoding = locale_encoding_alias.get(norm_encoding, norm_encoding)
    return langname + '.' + encoding
예제 #28
0
def _replace_encoding(code, encoding):
    if '.' in code:
        langname = code[:code.index('.')]
    else:
        langname = code
    norm_encoding = encodings.normalize_encoding(encoding)
    norm_encoding = encodings.aliases.aliases.get(norm_encoding, norm_encoding)
    encoding = locale_encoding_alias.get(norm_encoding, norm_encoding)
    return langname + '.' + encoding
예제 #29
0
def auth_test_view(response: Response, password: Optional[str] = None):
    if password is None:
        response.status_code = status.HTTP_401_UNAUTHORIZED
        return response

    password_sha512 = sha512()
    password_sha512.update(
        bytes(password, encodings.normalize_encoding('utf8')))
    return password_sha512.hexdigest(), password
예제 #30
0
파일: ed_txt.py 프로젝트: Diapolo10/editra
def GetEncodings():
    """Get a list of possible encodings to try from the locale information
    @return: list of strings

    """
    encodings = list()
    encodings.append(Profile_Get('ENCODING', None))

    try:
        encodings.append(locale.getpreferredencoding())
    except:
        pass
    
    encodings.append('utf-8')

    try:
        if hasattr(locale, 'nl_langinfo'):
            encodings.append(locale.nl_langinfo(locale.CODESET))
    except:
        pass
    try:
        encodings.append(locale.getlocale()[1])
    except:
        pass
    try:
        encodings.append(locale.getdefaultlocale()[1])
    except:
        pass
    encodings.append(sys.getfilesystemencoding())
    encodings.append('utf-16')
    encodings.append('utf-16-le') # for files without BOM...
    encodings.append('latin-1')

    # Normalize all names
    normlist = [ enclib.normalize_encoding(enc) for enc in encodings if enc]

    # Clean the list for duplicates and None values
    rlist = list()
    codec_list = list()
    for enc in normlist:
        if enc is not None and len(enc):
            enc = enc.lower()
            if enc not in rlist:
                # Ascii is useless so ignore it (ascii, us_ascii, ...)
                if 'ascii' in enc:
                    continue

                try:
                    ctmp = codecs.lookup(enc)
                    if ctmp.name not in codec_list:
                        codec_list.append(ctmp.name)
                        rlist.append(enc)
                except LookupError:
                    pass
    return rlist
예제 #31
0
def activate_encoding(tar_names=('syntax-extensions',)):
    from encodings import normalize_encoding
    from codecs import register
    from syntax_extensions.activate.encoding import codec_info

    names = _already_activated.setdefault('encoding', set())

    if names:
        names.update(normalize_encoding(n) for n in tar_names)
        return
    else:
        names.update(normalize_encoding(n) for n in tar_names)

        def finder(n: str):
            if normalize_encoding(n) in names:
                return codec_info
            else:
                return None

        register(finder)
예제 #32
0
def normalize_encoding(encoding):
    '''Return the normalize form of the encoding.
    '''
    if not encoding:
        return None
    encoding = encodings.normalize_encoding(encoding).lower()
    encoding = encodings.aliases.aliases.get(encoding, encoding)
    encoding = _default_encoding_translation.get(encoding, encoding)
    try:
        return codecs.lookup(encoding).name
    except LookupError:
        return None
예제 #33
0
def normalize_encoding(encoding):
    '''Return the normalize form of the encoding.
    '''
    if not encoding:
        return None
    encoding = encodings.normalize_encoding(encoding).lower()
    encoding = encodings.aliases.aliases.get(encoding, encoding)
    encoding = _default_encoding_translation.get(encoding, encoding)
    try:
        return codecs.lookup(encoding).name
    except LookupError:
        return None
예제 #34
0
def search_function(encoding):
    if normalize_encoding(encoding) == 'mutf_8':
        return codecs.CodecInfo(
            name='mutf-8',
            encode=encode,
            decode=decode,
            incrementalencoder=IncrementalEncoder,
            incrementaldecoder=IncrementalDecoder,
            streamreader=StreamReader,
            streamwriter=StreamWriter,
        )
    else:
        return None
예제 #35
0
파일: mutf_8.py 프로젝트: Ciemaar/voc
def search_function(encoding):
    if normalize_encoding(encoding) == 'mutf_8':
        return codecs.CodecInfo(
            name='mutf-8',
            encode=encode,
            decode=decode,
            incrementalencoder=IncrementalEncoder,
            incrementaldecoder=IncrementalDecoder,
            streamreader=StreamReader,
            streamwriter=StreamWriter,
        )
    else:
        return None
예제 #36
0
 def filter_data(self, template_name, data):
     """
     Add panel to repository settings.
     """
     if template_name == 'settings.html':
         # Append our template
         template = self.app.templates.get_template("set_encoding.html")
         data["templates_content"].append(template)
         # Query current data from database.
         repo_obj = librdiff.RdiffRepo(self.app.currentuser.user_root, data['repo_path'])
         current_encoding = repo_obj.get_encoding()
         current_encoding = encodings.normalize_encoding(current_encoding)
         data['current_encoding'] = current_encoding
예제 #37
0
    def _get_parms_for_page(self, repo_obj):
        assert isinstance(repo_obj, librdiff.RdiffRepo)

        current_encoding = repo_obj.get_encoding() or rdw_helpers.system_charset
        current_encoding = encodings.normalize_encoding(current_encoding)

        return {
            'repo_name': repo_obj.display_name,
            'repo_path': repo_obj.path,
            'settings': True,
            'supported_encodings': self._get_encodings(),
            'current_encoding': current_encoding
        }
예제 #38
0
 def filter_data(self, template_name, data):
     """
     Add panel to repository settings.
     """
     if template_name == 'settings.html':
         # Append our template
         template = self.app.templates.get_template("set_encoding.html")
         data["templates_content"].append(template)
         # Query current data from database.
         repo_obj = librdiff.RdiffRepo(self.app.currentuser.user_root,
                                       data['repo_path'])
         current_encoding = repo_obj.get_encoding()
         current_encoding = encodings.normalize_encoding(current_encoding)
         data['current_encoding'] = current_encoding
예제 #39
0
 def copy_path(self, table: SQLTable, path: Path, encoding: str,
               dialect: Dialect) -> Iterable[str]:
     encoding = normalize_encoding(encoding).upper().replace("_", "")
     lines = [
         f"LOAD DATA INFILE '{path}'",
         f"INTO TABLE `{table.name}`",
         f"CHARACTER SET '{encoding}'",
         f"FIELDS TERMINATED BY '{dialect.delimiter}'",
         f"OPTIONALLY ENCLOSED BY '{dialect.quotechar}'",
     ]
     if not dialect.doublequote:
         lines.append(f"ESCAPED BY '{dialect.escapechar}'")
     lines.append("IGNORE 1 LINES")
     return "\n".join(lines),
예제 #40
0
def normalize(localename):
    """ Returns a normalized locale code for the given locale
        name.
    
        The returned locale code is formatted for use with
        setlocale().
    
        If normalization fails, the original name is returned
        unchanged.
    
        If the given encoding is not known, the function defaults to
        the default encoding for the locale code just like setlocale()
        does.
    
    """
    fullname = localename.lower()
    if ':' in fullname:
        fullname = fullname.replace(':', '.')
    if '.' in fullname:
        langname, encoding = fullname.split('.')[:2]
        fullname = langname + '.' + encoding
    else:
        langname = fullname
        encoding = ''
    norm_encoding = encoding.replace('-', '')
    norm_encoding = norm_encoding.replace('_', '')
    lookup_name = langname + '.' + encoding
    code = locale_alias.get(lookup_name, None)
    if code is not None:
        return code
    code = locale_alias.get(langname, None)
    if code is not None:
        if '.' in code:
            langname, defenc = code.split('.')
        else:
            langname = code
            defenc = ''
        if encoding:
            norm_encoding = encodings.normalize_encoding(encoding)
            norm_encoding = encodings.aliases.aliases.get(norm_encoding, norm_encoding)
            encoding = locale_encoding_alias.get(norm_encoding, norm_encoding)
        else:
            encoding = defenc
        if encoding:
            return langname + '.' + encoding
        else:
            return langname
    else:
        return localename
예제 #41
0
def decodeURLContent(content):
    """Decodes the content read from a URL"""
    project = GlobalData().project
    if project.isLoaded():
        projectEncoding = project.props['encoding']
        if projectEncoding:
            if not isValidEncoding(projectEncoding):
                raise Exception(
                    "The prject encoding " + projectEncoding + " is invalid. "
                    "Please select a valid one in the project properties and "
                    "try again.")
            return content.decode(
                encodings.normalize_encoding(projectEncoding))

    # Check the IDE wide encoding
    ideEncoding = Settings()['encoding']
    if ideEncoding:
        if not isValidEncoding(ideEncoding):
            raise Exception("The ide encoding " + ideEncoding + " is invalid. "
                            "Please set a valid one and try again.")
        return content.decode(encodings.normalize_encoding(ideEncoding))

    # The default one
    return content.decode(DEFAULT_ENCODING)
예제 #42
0
    def _handle_set_encoding(self, repo_obj, **kwargs):
        """
        Change the encoding of the repository.
        """
        # Validate the encoding value
        new_encoding = kwargs.get('encoding')
        new_encoding = unicode(encodings.normalize_encoding(new_encoding)).lower()
        if new_encoding not in self._get_encodings():
            raise ValueError(_("invalid encoding value"))

        # Update the repository encoding
        _logger.info("updating repository [%s] encoding [%s]", repo_obj, new_encoding)
        repo_obj.set_encoding(new_encoding)

        return {'success': _("Repository updated successfully with new encoding.")}
예제 #43
0
    def copy_stream(self) -> str:
        encoding = normalize_encoding(self._encoding).upper()
        options = {"FORMAT": "CSV",
                   "HEADER": "TRUE", "ENCODING": f"'{encoding}'"}
        if self._dialect.delimiter != ',':
            options["DELIMITER"] = self._escape_char(self._dialect.delimiter)
        if not self._dialect.doublequote:
            options["ESCAPE"] = self._escape_char(self._dialect.escapechar)
        if self._dialect.quotechar != '"':
            options["QUOTE"] = self._escape_char(self._dialect.quotechar)
        options["FORCE_NULL"] = '("{}")'.format('", "'.join(self._force_null))
        options_str = ", ".join(f"{k} {v}" for k, v in options.items())

        return (f'COPY "{self._table_name}" '
                f'FROM \'{self._file.absolute().as_posix()}\' '
                f'WITH ({options_str});')
예제 #44
0
def auth_view(response: Response,
              password: Optional[str] = None,
              password_hash: Optional[str] = None):
    if password is None or password_hash is None or password == '':
        response.status_code = status.HTTP_401_UNAUTHORIZED
        return response

    password_sha512 = sha512()
    password_sha512.update(
        bytes(password, encodings.normalize_encoding('utf8')))

    if password_sha512.hexdigest() == password_hash:
        response.status_code = status.HTTP_204_NO_CONTENT
    else:
        response.status_code = status.HTTP_401_UNAUTHORIZED
    return response
예제 #45
0
    def __init__(self, binary=None, home=None, keyring=None, secring=None,
                 use_agent=False, default_preference_list=None,
                 verbose=False, options=None):

        self.binary  = _util._find_binary(binary)
        self.homedir = home if home else _util._conf
        pub = _parsers._fix_unsafe(keyring) if keyring else 'pubring.gpg'
        sec = _parsers._fix_unsafe(secring) if secring else 'secring.gpg'
        self.keyring = os.path.join(self._homedir, pub)
        self.secring = os.path.join(self._homedir, sec)
        self.options = _parsers._sanitise(options) if options else None

        if default_preference_list:
            self._prefs = _check_preferences(default_preference_list, 'all')
        else:
            self._prefs  = 'SHA512 SHA384 SHA256 AES256 CAMELLIA256 TWOFISH'
            self._prefs += ' AES192 ZLIB ZIP Uncompressed'

        encoding = locale.getpreferredencoding()
        if encoding is None: # This happens on Jython!
            encoding = sys.stdin.encoding
        self._encoding = encoding.lower().replace('-', '_')
        self._filesystemencoding = encodings.normalize_encoding(
            sys.getfilesystemencoding().lower())

        self._keyserver = 'hkp://subkeys.pgp.net'
        self.__generated_keys = os.path.join(self.homedir, 'generated-keys')

        try:
            assert self.binary, "Could not find binary %s" % binary
            assert isinstance(verbose, (bool, str, int)), \
                "'verbose' must be boolean, string, or 0 <= n <= 9"
            assert isinstance(use_agent, bool), "'use_agent' must be boolean"
            if self.options is not None:
                assert isinstance(self.options, str), "options not string"
        except (AssertionError, AttributeError) as ae:
            log.error("GPGBase.__init__(): %s" % ae.message)
            raise RuntimeError(ae.message)
        else:
            self.verbose = verbose
            self.use_agent = use_agent

        if hasattr(self, '_agent_proc') \
                and getattr(self, '_remove_agent', None) is True:
            if hasattr(self, '__remove_path__'):
                self.__remove_path__('pinentry')
예제 #46
0
    def __init__(self, binary=None, home=None, keyring=None, secring=None,
                 use_agent=False, default_preference_list=None,
                 verbose=False, options=None):

        self.binary  = _util._find_binary(binary)
        self.homedir = home if home else _util._conf
        pub = _parsers._fix_unsafe(keyring) if keyring else 'pubring.gpg'
        sec = _parsers._fix_unsafe(secring) if secring else 'secring.gpg'
        self.keyring = os.path.join(self._homedir, pub)
        self.secring = os.path.join(self._homedir, sec)
        self.options = _parsers._sanitise(options) if options else None

        if default_preference_list:
            self._prefs = _check_preferences(default_preference_list, 'all')
        else:
            self._prefs  = 'SHA512 SHA384 SHA256 AES256 CAMELLIA256 TWOFISH'
            self._prefs += ' AES192 ZLIB ZIP Uncompressed'

        encoding = locale.getpreferredencoding()
        if encoding is None: # This happens on Jython!
            encoding = sys.stdin.encoding
        self._encoding = encoding.lower().replace('-', '_')
        self._filesystemencoding = encodings.normalize_encoding(
            sys.getfilesystemencoding().lower())

        self._keyserver = 'hkp://subkeys.pgp.net'
        self.__generated_keys = os.path.join(self.homedir, 'generated-keys')

        try:
            assert self.binary, "Could not find binary %s" % binary
            assert isinstance(verbose, (bool, str, int)), \
                "'verbose' must be boolean, string, or 0 <= n <= 9"
            assert isinstance(use_agent, bool), "'use_agent' must be boolean"
            if self.options is not None:
                assert isinstance(self.options, str), "options not string"
        except (AssertionError, AttributeError) as ae:
            log.error("GPGBase.__init__(): %s" % ae.message)
            raise RuntimeError(ae.message)
        else:
            self.verbose = verbose
            self.use_agent = use_agent

        if hasattr(self, '_agent_proc') \
                and getattr(self, '_remove_agent', None) is True:
            if hasattr(self, '__remove_path__'):
                self.__remove_path__('pinentry')
예제 #47
0
def getTextFromRawBytes(buf: bytes,
                        numChars: int,
                        encoding: Optional[str] = None,
                        errorsFallback: str = "replace"):
    """
	Gets a string from a raw bytes object, decoded using the specified L{encoding}.
	In most cases, the bytes object is fetched by passing the raw attribute of a ctypes.c_char-Array to this function.
	If L{encoding} is C{None}, the bytes object is inspected on whether it contains single byte or multi byte characters.
	As a first attempt, the bytes are encoded using the surrogatepass error handler.
	This handler behaves like strict for all encodings without surrogates,
	while making sure that surrogates are properly decoded when using UTF-16.
	If that fails, the exception is logged and the bytes are decoded
	according to the L{errorsFallback} error handler.
	"""
    if encoding is None:
        # If the buffer we got contains any non null characters from numChars to the buffer's end,
        # the buffer most likely contains multibyte characters.
        # Note that in theory, it could also be a multibyte character string
        # with nulls taking up the second half of the string.
        # Unfortunately, there isn't a good way to detect those cases.
        if numChars > 1 and any(buf[numChars:]):
            encoding = WCHAR_ENCODING
        else:
            encoding = USER_ANSI_CODE_PAGE
    else:
        encoding = encodings.normalize_encoding(encoding).lower()
    if encoding.startswith("utf_16"):
        numBytes = numChars * 2
    elif encoding.startswith("utf_32"):
        numBytes = numChars * 4
    else:  # All other encodings are single byte.
        numBytes = numChars
    rawText: bytes = buf[:numBytes]
    if not any(rawText):
        # rawText is empty or only contains null characters.
        # If this is a range with only null characters in it, there's not much we can do about this.
        return ""
    try:
        text = rawText.decode(encoding, errors="surrogatepass")
    except UnicodeDecodeError:
        log.debugWarning(
            "Error decoding text in %r, probably wrong encoding assumed or incomplete data"
            % buf)
        text = rawText.decode(encoding, errors=errorsFallback)
    return text
예제 #48
0
    def copy_stream(self, table: SQLTable, encoding: str,
                    dialect: Dialect) -> \
            Iterable[str]:
        encoding = normalize_encoding(encoding).upper()
        options = {
            "FORMAT": "CSV",
            "HEADER": "TRUE",
            "ENCODING": f"'{encoding}'"
        }
        if dialect.delimiter != ',':
            options["DELIMITER"] = self._escape_char(dialect.delimiter)
        if not dialect.doublequote:
            options["ESCAPE"] = self._escape_char(dialect.escapechar)
        if dialect.quotechar != '"':
            options["QUOTE"] = self._escape_char(dialect.quotechar)
        options_str = ", ".join(f"{k} {v}" for k, v in options.items())

        return f"COPY {table.name} FROM STDIN WITH ({options_str})",
예제 #49
0
def _replace_encoding(code, encoding):
    if '.' in code:
        langname = code[:code.index('.')]
    else:
        langname = code
    norm_encoding = encodings.normalize_encoding(encoding)
    norm_encoding = encodings.aliases.aliases.get(norm_encoding.lower(),
        norm_encoding)
    encoding = norm_encoding
    norm_encoding = norm_encoding.lower()
    if norm_encoding in locale_encoding_alias:
        encoding = locale_encoding_alias[norm_encoding]
    else:
        norm_encoding = norm_encoding.replace('_', '')
        norm_encoding = norm_encoding.replace('-', '')
        if norm_encoding in locale_encoding_alias:
            encoding = locale_encoding_alias[norm_encoding]
    return langname + '.' + encoding
예제 #50
0
def GetEncoding(xml_header):
  """ Return the encoding in the xml header
  Args:
    xmlheader: '<?xml ... encoding='ENCODING'?>'
  Returns:
    codec: Python encodings-normalized case-folded ENCODING
    None: if there is either no xml header or no encoding specified
  """
  if not xml_header:
    return None
  encoding_str = 'encoding='
  encoding_str_len = len(encoding_str)
  encoding = xml_header.find(encoding_str)
  if encoding == -1:
    return None
  val = UnQuote(xml_header[encoding+encoding_str_len:])
  if val:
    return encodings.normalize_encoding(val.lower())
  return None
예제 #51
0
    def __init__(self, parent):
        wx.Panel.__init__(self, parent)

        # Attributes
        self._box = wx.StaticBox(self, label=_("Login Settings"))
        self._boxsz = wx.StaticBoxSizer(self._box, wx.HORIZONTAL)
        self._host = wx.TextCtrl(self)
        self._port = wx.TextCtrl(self, value=u"21") # TODO: Integer only!
        self._user = wx.TextCtrl(self)
        self._pass = wx.TextCtrl(self, style=wx.TE_PASSWORD)
        self._path = wx.TextCtrl(self)
        enclst = GetAllEncodings()
        default = encodings.normalize_encoding('utf-8')
        self._enc = wx.Choice(self, choices=enclst)
        if default in enclst:
            self._enc.SetStringSelection(default)

        # Layout
        self.__DoLayout()
        self.SetInitialSize()
예제 #52
0
파일: request.py 프로젝트: ndparker/wtf
    def _determine_encoding(store):
        """ Guess encoding of the request parameters """
        # try simple method first...
        encoding = store.getfirst('_charset_')
        if not encoding:
            # peek is assumed to be '\xe4', i.e. &#228;
            encoding = {
                '\xc3\xa4': 'utf-8',
                None      : 'utf-8',
                ''        : 'utf-8',
                '\x84'    : 'cp437', # default lynx on dos
            }.get(store.getfirst('_peek_'), 'cp1252')
        encoding = _encodings.normalize_encoding(encoding)

        # fix known browser bug, but it doesn't exactly hurt:
        if encoding.replace('_', '').decode('latin-1').lower() == u'iso88591':
            encoding = 'cp1252'
        else:
            try:
                _codecs.lookup(encoding)
            except LookupError:
                # doh!
                encoding = 'cp1252'
        return encoding
예제 #53
0
    def __init__(self, binary=None, home=None, keyring=None, secring=None,
                 use_agent=False, default_preference_list=None,
                 verbose=False, options=None):
        """Create a ``GPGBase``.

        This class is used to set up properties for controlling the behaviour
        of configuring various options for GnuPG, such as setting GnuPG's
        **homedir** , and the paths to its **binary** and **keyring** .

        :const binary: (:obj:`str`) The full path to the GnuPG binary.

        :ivar homedir: (:class:`~gnupg._util.InheritableProperty`) The full
                       path to the current setting for the GnuPG
                       ``--homedir``.

        :ivar _generated_keys: (:class:`~gnupg._util.InheritableProperty`)
                               Controls setting the directory for storing any
                               keys which are generated with
                               :meth:`~gnupg.GPG.gen_key`.

        :ivar str keyring: The filename in **homedir** to use as the keyring
                           file for public keys.
        :ivar str secring: The filename in **homedir** to use as the keyring
                           file for secret keys.
        """
        self.binary  = _util._find_binary(binary)
        self.homedir = os.path.expanduser(home) if home else _util._conf
        pub = _parsers._fix_unsafe(keyring) if keyring else 'pubring.gpg'
        sec = _parsers._fix_unsafe(secring) if secring else 'secring.gpg'
        self.keyring = os.path.join(self._homedir, pub)
        self.secring = os.path.join(self._homedir, sec)
        self.options = _parsers._sanitise(options) if options else None

        #: The version string of our GnuPG binary
        self.binary_version = '0.0.0'
        self.verbose = False

        if default_preference_list:
            self._prefs = _check_preferences(default_preference_list, 'all')
        else:
            self._prefs  = 'SHA512 SHA384 SHA256 AES256 CAMELLIA256 TWOFISH'
            self._prefs += ' AES192 ZLIB ZIP Uncompressed'

        encoding = locale.getpreferredencoding()
        if encoding is None: # This happens on Jython!
            encoding = sys.stdin.encoding
        self._encoding = encoding.lower().replace('-', '_')
        self._filesystemencoding = encodings.normalize_encoding(
            sys.getfilesystemencoding().lower())

        # Issue #49: https://github.com/isislovecruft/python-gnupg/issues/49
        #
        # During `line = stream.readline()` in `_read_response()`, the Python
        # codecs module will choke on Unicode data, so we globally monkeypatch
        # the "strict" error handler to use the builtin `replace_errors`
        # handler:
        codecs.register_error('strict', codecs.replace_errors)

        self._keyserver = 'hkp://wwwkeys.pgp.net'
        self.__generated_keys = os.path.join(self.homedir, 'generated-keys')

        try:
            assert self.binary, "Could not find binary %s" % binary
            assert isinstance(verbose, (bool, str, int)), \
                "'verbose' must be boolean, string, or 0 <= n <= 9"
            assert isinstance(use_agent, bool), "'use_agent' must be boolean"
            if self.options is not None:
                assert isinstance(self.options, str), "options not string"
        except (AssertionError, AttributeError) as ae:
            log.error("GPGBase.__init__(): %s" % str(ae))
            raise RuntimeError(str(ae))
        else:
            self._set_verbose(verbose)
            self.use_agent = use_agent

        if hasattr(self, '_agent_proc') \
                and getattr(self, '_remove_agent', None) is True:
            if hasattr(self, '__remove_path__'):
                self.__remove_path__('pinentry')

        # Assign our self.binary_version attribute:
        self._check_sane_and_get_gpg_version()
예제 #54
0
class DefaultSaxHandler(object):
    def start_element(self,name,attrs):
        print('sax:start_element: %s , attrs : %s' % (name,str(attrs)))
        print(type(attrs))
    def end_element(self,name):
        print('sax:end_element: %s' % name)

    def char_data(self,text):
        print('sax:char_data: %s ' % text)

xml = r'''<?xml version="1.0" encoding="utf-8"?>
<ol>
  <li><a href="/python">Python</a></li>
  <li><a href="google">google</a></li>
</ol>
'''
L = []
L.append(r'<?xml version="1.0"?>')
L.append(r'<root>')
L.append(encodings.normalize_encoding('some & data'))
L.append(r'</root>')
print(''.join(L))
print(type({}))
handler = DefaultSaxHandler()
parser = ParserCreate()
parser.StartElementHandler = handler.start_element
parser.EndElementHandler = handler.end_element
parser.CharacterDataHandler = handler.char_data
parser.Parse(xml)
# parser.Parse(''.join(L))
예제 #55
0
 def my_search_function(encoding):
     norm_encoding = encodings.normalize_encoding(encoding)
     if norm_encoding != 'utf_8_sig':
         return None
     return (encode, decode, StreamReader, StreamWriter)
예제 #56
0
            return codecs.charmap_decode(input, self.errors, decoding_table)[0]

    class StreamWriter(Codec, codecs.StreamWriter):
        pass

    class StreamReader(Codec, codecs.StreamReader):
        pass

    return codecs.CodecInfo(
        name='sloppy-' + encoding,
        encode=Codec().encode,
        decode=Codec().decode,
        incrementalencoder=IncrementalEncoder,
        incrementaldecoder=IncrementalDecoder,
        streamreader=StreamReader,
        streamwriter=StreamWriter,
    )

# Define a codec for each incomplete encoding. The resulting CODECS dictionary
# can be used by the main module of ftfy.bad_codecs.
CODECS = {}
INCOMPLETE_ENCODINGS = (
    ['windows-%s' % num for num in range(1250, 1259)] +
    ['iso-8859-%s' % num for num in (3, 6, 7, 8, 11)] +
    ['cp%s' % num for num in range(1250, 1259)] + ['cp874']
)

for _encoding in INCOMPLETE_ENCODINGS:
    _new_name = normalize_encoding('sloppy-' + _encoding)
    CODECS[_new_name] = make_sloppy_codec(_encoding)
예제 #57
0
    def gen_key_input(self, separate_keyring=False, save_batchfile=False,
                      testing=False, **kwargs):
        """Generate a batch file for input to :meth:`GPG.gen_key()`.

        The GnuPG batch file key generation feature allows unattended key
        generation by creating a file with special syntax and then providing it
        to: ``gpg --gen-key --batch``. Batch files look like this:

        |  Name-Real: Alice
        |  Name-Email: [email protected]
        |  Expire-Date: 2014-04-01
        |  Key-Type: RSA
        |  Key-Length: 4096
        |  Key-Usage: cert
        |  Subkey-Type: RSA
        |  Subkey-Length: 4096
        |  Subkey-Usage: encrypt,sign,auth
        |  Passphrase: sekrit
        |  %pubring foo.gpg
        |  %secring sec.gpg
        |  %commit

        which is what this function creates for you. All of the available,
        non-control parameters are detailed below (control parameters are the
        ones which begin with a '%'). For example, to generate the batch file
        example above, use like this:

        >>> import gnupg
        GnuPG logging disabled...
        >>> from __future__ import print_function
        >>> gpg = gnupg.GPG(homedir='doctests')
        >>> alice = { 'name_real': 'Alice',
        ...     'name_email': '*****@*****.**',
        ...     'expire_date': '2014-04-01',
        ...     'key_type': 'RSA',
        ...     'key_length': 4096,
        ...     'key_usage': '',
        ...     'subkey_type': 'RSA',
        ...     'subkey_length': 4096,
        ...     'subkey_usage': 'encrypt,sign,auth',
        ...     'passphrase': 'sekrit'}
        >>> alice_input = gpg.gen_key_input(**alice)
        >>> print(alice_input)
        Key-Type: RSA
        Subkey-Type: RSA
        Subkey-Usage: encrypt,sign,auth
        Expire-Date: 2014-04-01
        Passphrase: sekrit
        Name-Real: Alice
        Name-Email: [email protected]
        Key-Length: 4096
        Subkey-Length: 4096
        %pubring ./doctests/alice.pubring.gpg
        %secring ./doctests/alice.secring.gpg
        %commit
        <BLANKLINE>
        >>> alice_key = gpg.gen_key(alice_input)
        >>> assert alice_key is not None
        >>> assert alice_key.fingerprint is not None
        >>> message = "no one else can read my sekrit message"
        >>> encrypted = gpg.encrypt(message, alice_key.fingerprint)
        >>> assert isinstance(encrypted.data, str)

        :param bool separate_keyring: Specify for the new key to be written to
            a separate pubring.gpg and secring.gpg. If True,
            :meth:`GPG.gen_key` will automatically rename the separate keyring
            and secring to whatever the fingerprint of the generated key ends
            up being, suffixed with '.pubring' and '.secring' respectively.

        :param bool save_batchfile: Save a copy of the generated batch file to
            disk in a file named <name_real>.batch, where <name_real> is the
            ``name_real`` parameter stripped of punctuation, spaces, and
            non-ascii characters.

        :param bool testing: Uses a faster, albeit insecure random number
            generator to create keys. This should only be used for testing
            purposes, for keys which are going to be created and then soon
            after destroyed, and never for the generation of actual use keys.

        :param str name_real: The name field of the UID in the generated key.
        :param str name_comment: The comment in the UID of the generated key.
        :param str name_email: The email in the UID of the generated key.
            (default: $USER@$(hostname) ) Remember to use UTF-8 encoding for
            the entirety of the UID. At least one of ``name_real``,
            ``name_comment``, or ``name_email`` must be provided, or else no
            user ID is created.

        :param str key_type: One of 'RSA', 'DSA', 'ELG-E', or 'default'.
            (default: 'default') Starts a new parameter block by giving the
            type of the primary key. The algorithm must be capable of
            signing. This is a required parameter. The algorithm may either be
            an OpenPGP algorithm number or a string with the algorithm
            name. The special value ‘default’ may be used for algo to create
            the default key type; in this case a ``key_usage`` should not be
            given and 'default' must also be used for ``subkey_type``.

        :param int key_length: The requested length of the generated key in
            bits. (Default: 4096)

        :param str key_grip: hexstring This is an optional hexidecimal string
            which is used to generate a CSR or certificate for an already
            existing key. ``key_length`` will be ignored if this parameter
            is given.

        :param str key_usage: Space or comma delimited string of key
            usages. Allowed values are ‘encrypt’, ‘sign’, and ‘auth’. This is
            used to generate the key flags. Please make sure that the
            algorithm is capable of this usage. Note that OpenPGP requires
            that all primary keys are capable of certification, so no matter
            what usage is given here, the ‘cert’ flag will be on. If no
            ‘Key-Usage’ is specified and the ‘Key-Type’ is not ‘default’, all
            allowed usages for that particular algorithm are used; if it is
            not given but ‘default’ is used the usage will be ‘sign’.

        :param str subkey_type: This generates a secondary key
            (subkey). Currently only one subkey can be handled. See also
            ``key_type`` above.

        :param int subkey_length: The length of the secondary subkey in bits.

        :param str subkey_usage: Key usage for a subkey; similar to
            ``key_usage``.

        :type expire_date: int or str
        :param expire_date: Can be specified as an iso-date or as
            <int>[d|w|m|y] Set the expiration date for the key (and the
            subkey). It may either be entered in ISO date format (2000-08-15)
            or as number of days, weeks, month or years. The special notation
            "seconds=N" is also allowed to directly give an Epoch
            value. Without a letter days are assumed. Note that there is no
            check done on the overflow of the type used by OpenPGP for
            timestamps. Thus you better make sure that the given value make
            sense. Although OpenPGP works with time intervals, GnuPG uses an
            absolute value internally and thus the last year we can represent
            is 2105.

        :param str creation_date: Set the creation date of the key as stored
            in the key information and which is also part of the fingerprint
            calculation. Either a date like "1986-04-26" or a full timestamp
            like "19860426T042640" may be used. The time is considered to be
            UTC. If it is not given the current time is used.

        :param str passphrase: The passphrase for the new key. The default is
            to not use any passphrase. Note that GnuPG>=2.1.x will not allow
            you to specify a passphrase for batch key generation -- GnuPG will
            ignore the ``passphrase`` parameter, stop, and ask the user for
            the new passphrase.  However, we can put the command
            '%no-protection' into the batch key generation file to allow a
            passwordless key to be created, which can then have its passphrase
            set later with '--edit-key'.

        :param str preferences: Set the cipher, hash, and compression
            preference values for this key. This expects the same type of
            string as the sub-command ‘setpref’ in the --edit-key menu.

        :param str revoker: Should be given as 'algo:fpr' [case sensitive].
            Add a designated revoker to the generated key. Algo is the public
            key algorithm of the designated revoker (i.e. RSA=1, DSA=17, etc.)
            fpr is the fingerprint of the designated revoker. The optional
            ‘sensitive’ flag marks the designated revoker as sensitive
            information. Only v4 keys may be designated revokers.

        :param str keyserver: This is an optional parameter that specifies the
            preferred keyserver URL for the key.

        :param str handle: This is an optional parameter only used with the
            status lines KEY_CREATED and KEY_NOT_CREATED. string may be up to
            100 characters and should not contain spaces. It is useful for
            batch key generation to associate a key parameter block with a
            status line.

        :rtype: str
        :returns: A suitable input string for the :meth:`GPG.gen_key` method,
            the latter of which will create the new keypair.

        see
        http://www.gnupg.org/documentation/manuals/gnupg-devel/Unattended-GPG-key-generation.html
        for more details.
        """
        parms = {}

        #: A boolean for determining whether to set subkey_type to 'default'
        default_type = False

        name_email = kwargs.get('name_email')
        uidemail = _util.create_uid_email(name_email)

        parms.setdefault('Key-Type', 'default')
        parms.setdefault('Key-Length', 4096)
        parms.setdefault('Name-Real', "Autogenerated Key")
        parms.setdefault('Expire-Date', _util._next_year())
        parms.setdefault('Name-Email', uidemail)

        if testing:
            ## This specific comment string is required by (some? all?)
            ## versions of GnuPG to use the insecure PRNG:
            parms.setdefault('Name-Comment', 'insecure!')

        for key, val in list(kwargs.items()):
            key = key.replace('_','-').title()
            ## to set 'cert', 'Key-Usage' must be blank string
            if not key in ('Key-Usage', 'Subkey-Usage'):
                if str(val).strip():
                    parms[key] = val

        ## if Key-Type is 'default', make Subkey-Type also be 'default'
        if parms['Key-Type'] == 'default':
            default_type = True
            for field in ('Key-Usage', 'Subkey-Usage',):
                try: parms.pop(field)  ## toss these out, handle manually
                except KeyError: pass

        ## Key-Type must come first, followed by length
        out  = "Key-Type: %s\n" % parms.pop('Key-Type')
        out += "Key-Length: %d\n" % parms.pop('Key-Length')
        if 'Subkey-Type' in parms.keys():
            out += "Subkey-Type: %s\n" % parms.pop('Subkey-Type')
        else:
            if default_type:
                out += "Subkey-Type: default\n"
        if 'Subkey-Length' in parms.keys():
            out += "Subkey-Length: %s\n" % parms.pop('Subkey-Length')

        for key, val in list(parms.items()):
            out += "%s: %s\n" % (key, val)

        ## There is a problem where, in the batch files, if the '%%pubring'
        ## and '%%secring' are given as any static string, i.e. 'pubring.gpg',
        ## that file will always get rewritten without confirmation, killing
        ## off any keys we had before. So in the case where we wish to
        ## generate a bunch of keys and then do stuff with them, we should not
        ## give 'pubring.gpg' as our keyring file, otherwise we will lose any
        ## keys we had previously.

        if separate_keyring:
            ring = str(uidemail + '_' + str(_util._utc_epoch()))
            self.temp_keyring = os.path.join(self.homedir, ring+'.pubring')
            self.temp_secring = os.path.join(self.homedir, ring+'.secring')
            out += "%%pubring %s\n" % self.temp_keyring
            out += "%%secring %s\n" % self.temp_secring

        if testing:
            ## see TODO file, tag :compatibility:gen_key_input:
            ##
            ## Add version detection before the '%no-protection' flag.
            out += "%no-protection\n"
            out += "%transient-key\n"

        out += "%commit\n"

        ## if we've been asked to save a copy of the batch file:
        if save_batchfile and parms['Name-Email'] != uidemail:
            asc_uid  = encodings.normalize_encoding(parms['Name-Email'])
            filename = _fix_unsafe(asc_uid) + _util._now() + '.batch'
            save_as  = os.path.join(self._batch_dir, filename)
            readme = os.path.join(self._batch_dir, 'README')

            if not os.path.exists(self._batch_dir):
                os.makedirs(self._batch_dir)

                ## the following pulls the link to GnuPG's online batchfile
                ## documentation from this function's docstring and sticks it
                ## in a README file in the batch directory:

                if getattr(self.gen_key_input, '__doc__', None) is not None:
                    docs = self.gen_key_input.__doc__
                else:
                    docs = str() ## docstring=None if run with "python -OO"
                links = '\n'.join(x.strip() for x in docs.splitlines()[-2:])
                explain = """
This directory was created by python-gnupg, on {}, and
it contains saved batch files, which can be given to GnuPG to automatically
generate keys. Please see
{}""".format(_util.now(), links) ## sometimes python is awesome.

                with open(readme, 'a+') as fh:
                    [fh.write(line) for line in explain]

            with open(save_as, 'a+') as batch_file:
                [batch_file.write(line) for line in out]

        return out
예제 #58
0
    def __init__(self, binary=None, home=None, keyring=None, secring=None,
                 use_agent=False, default_preference_list=None,
                 verbose=False, options=None):
        """Create a ``GPGBase``.

        This class is used to set up properties for controlling the behaviour
        of configuring various options for GnuPG, such as setting GnuPG's
        **homedir** , and the paths to its **binary** and **keyring** .

        :const binary: (:obj:`str`) The full path to the GnuPG binary.

        :ivar homedir: (:class:`~gnupg._util.InheritableProperty`) The full
                       path to the current setting for the GnuPG
                       ``--homedir``.

        :ivar _generated_keys: (:class:`~gnupg._util.InheritableProperty`)
                               Controls setting the directory for storing any
                               keys which are generated with
                               :meth:`~gnupg.GPG.gen_key`.

        :ivar str keyring: The filename in **homedir** to use as the keyring
                           file for public keys.
        :ivar str secring: The filename in **homedir** to use as the keyring
                           file for secret keys.
        """
        self.binary  = _util._find_binary(binary)
        self.homedir = home if home else _util._conf
        pub = _parsers._fix_unsafe(keyring) if keyring else 'pubring.gpg'
        sec = _parsers._fix_unsafe(secring) if secring else 'secring.gpg'
        self.keyring = os.path.join(self._homedir, pub)
        self.secring = os.path.join(self._homedir, sec)
        self.options = _parsers._sanitise(options) if options else None

        if default_preference_list:
            self._prefs = _check_preferences(default_preference_list, 'all')
        else:
            self._prefs  = 'SHA512 SHA384 SHA256 AES256 CAMELLIA256 TWOFISH'
            self._prefs += ' AES192 ZLIB ZIP Uncompressed'

        encoding = locale.getpreferredencoding()
        if encoding is None: # This happens on Jython!
            encoding = sys.stdin.encoding
        self._encoding = encoding.lower().replace('-', '_')
        self._filesystemencoding = encodings.normalize_encoding(
            sys.getfilesystemencoding().lower())

        self._keyserver = 'hkp://wwwkeys.pgp.net'
        self.__generated_keys = os.path.join(self.homedir, 'generated-keys')

        try:
            assert self.binary, "Could not find binary %s" % binary
            assert isinstance(verbose, (bool, str, int)), \
                "'verbose' must be boolean, string, or 0 <= n <= 9"
            assert isinstance(use_agent, bool), "'use_agent' must be boolean"
            if self.options is not None:
                assert isinstance(self.options, str), "options not string"
        except (AssertionError, AttributeError) as ae:
            log.error("GPGBase.__init__(): %s" % str(ae))
            raise RuntimeError(str(ae))
        else:
            if verbose is True:
                # The caller wants logging, but we need a valid --debug-level
                # for gpg. Default to "basic", and warn about the ambiguity.
                # (garrettr)
                verbose = "basic"
                log.warning('GPG(verbose=True) is ambiguous, defaulting to "basic" logging')
            self.verbose = verbose
            self.use_agent = use_agent

        if hasattr(self, '_agent_proc') \
                and getattr(self, '_remove_agent', None) is True:
            if hasattr(self, '__remove_path__'):
                self.__remove_path__('pinentry')