Пример #1
0
    def run(self):
        utf8decoder = getdecoder('utf-8')
        latin1decoder = getdecoder('latin-1')

        while (not self.shutdown):
            data = self.client.socket.recv(4096)
            datalen = len(data)
            if datalen > 0:
                try:
                    (message, decoded) = utf8decoder(data)
                except (UnicodeDecodeError, UnicodeTranslateError):
                    (message, decoded) = latin1decoder(data)

                if datalen != decoded:
                    logger.warning('Not all bytes decoded: %r' % data)

                for line in [l.strip() for l in message.split('\n') if l]:
                    event, parameters = parse(line)

                    if event is None:
                        logger.error("Could not parse %r" % line)
                    else:
                        for listener in self.client.listeners[event]:
                            try:
                                listener(**parameters)
                            except:
                                logger.exception("Uncaught exception in event listener on message:\n%s" % line)
            else:
                sleep(0.5)
Пример #2
0
def normalise_encoding_name(option_name, encoding):
    """
    >>> normalise_encoding_name('c_string_encoding', 'ascii')
    'ascii'
    >>> normalise_encoding_name('c_string_encoding', 'AsCIi')
    'ascii'
    >>> normalise_encoding_name('c_string_encoding', 'us-ascii')
    'ascii'
    >>> normalise_encoding_name('c_string_encoding', 'utF8')
    'utf8'
    >>> normalise_encoding_name('c_string_encoding', 'utF-8')
    'utf8'
    >>> normalise_encoding_name('c_string_encoding', 'deFAuLT')
    'default'
    >>> normalise_encoding_name('c_string_encoding', 'default')
    'default'
    >>> normalise_encoding_name('c_string_encoding', 'SeriousLyNoSuch--Encoding')
    'SeriousLyNoSuch--Encoding'
    """
    if not encoding:
        return ''
    if encoding.lower() in ('default', 'ascii', 'utf8'):
        return encoding.lower()
    import codecs
    try:
        decoder = codecs.getdecoder(encoding)
    except LookupError:
        return encoding  # may exists at runtime ...
    for name in ('ascii', 'utf8'):
        if codecs.getdecoder(name) == decoder:
            return name
    return encoding
Пример #3
0
def get_report_content(path_to_file):
    try:
        with open(path_to_file, 'rb') as f:
            b_content = f.read()
    except FileNotFoundError:
        raise ScriptError(ErrorsCodes.input_file_not_found, {'file': path_to_file})

    decoder = None
    for codec in range(6):
        bom = codec_bom[codec]
        if bom == b_content[:len(bom)]:
            decoder = codecs.getdecoder(codec_decoder[codec])
            break

    if decoder is None:
        raise ScriptError(ErrorsCodes.input_file_read_error, {'file': path_to_file})

    try:
        content = decoder(b_content[len(bom):])[0]
    except:
        try:
            decoder = codecs.getdecoder('latin1')
            content = decoder(b_content[len(bom):])[0]
        except:
            raise ScriptError(ErrorsCodes.input_file_read_error, {'file': path_to_file})

    content = content.replace("\r", "")
    return content
Пример #4
0
def			ProcessLog( LogFile ):
	if( ( sys.version_info[ 0 ] == 2 and sys.version_info[ 1 ] >= 2 ) or ( sys.version_info[ 0 ] >= 3 ) ):
		if( conf.console_encoding != None ):
			d = codecs.getdecoder( conf.console_encoding );
		else:
			if( locale.getlocale()[ 1 ] == None ):
				d = codecs.getdecoder( 'cp1251' );
			else:
				d = codecs.getdecoder( locale.getlocale()[ 1 ] );
	else:
		if( conf.console_encoding != None ):
			d = codecs.lookup( conf.console_encoding )[ 1 ];
		else:
			if( sys.version_info[ 0 ] > 1 ):
				d = codecs.lookup( locale.getlocale()[ 1 ] )[ 1 ];
			else:
				d = None;
		
	if( sys.version_info[ 0 ] == 2 and sys.version_info[ 1 ] in [ 3 , 4 , 5 ,6 ] ):
		LogFile = d( LogFile )[ 0 ].replace( "\r" , "" );
	elif( sys.version_info[ 0 ] >= 3 ):
		if( type( LogFile ) == type( "" ) ):
			LogFile = LogFile.replace( "\r" , "" );
		else:
			LogFile = d( LogFile )[ 0 ].replace( "\r" , "" );
	else:
		if( d != None ):
			LogFile = d( LogFile )[ 0 ].replace( "\r" , "" );
		else:
			LogFile = 'ERROR (Log file is hidden but it can be found in %test_project_pah%/logs directory)';
	
	return( LogFile );
Пример #5
0
def fix_string(s):
    import codecs
    utf8 = codecs.getdecoder('UTF8')
    s = s.replace('\x80', utf8("£")[0].encode('latin1'))
    s = s.replace('\x85', utf8("...")[0].encode('latin1'))
    s = s.replace('\x86', utf8("-")[0].encode('latin1'))
    s = s.replace('\x92', utf8("'")[0].encode('latin1'))
    s = s.replace('\x93', utf8('"')[0].encode('latin1'))
    s = s.replace('\x94', utf8('"')[0].encode('latin1'))
    s = s.replace('\x95', utf8('-')[0].encode('latin1'))
    s = s.replace('\x96', utf8("à")[0].encode('latin1'))
    s = s.replace('\x97', utf8("-")[0].encode('latin1'))
    s = s.replace('\x99', utf8("tm")[0].encode('latin1'))
    s = s.replace('\xA3', utf8("¬")[0].encode('latin1'))
    s = s.replace('\xA4', utf8("¤")[0].encode('latin1'))
    s = s.replace('\xA7', utf8("§")[0].encode('latin1'))
    s = s.replace('\xC4', utf8("Ä")[0].encode('latin1'))
    s = s.replace('\xC5', utf8("Å")[0].encode('latin1'))
    s = s.replace('\xE4', utf8("ä")[0].encode('latin1'))
    s = s.replace('\xE5', utf8("å")[0].encode('latin1'))
    s = s.replace('\xE9', utf8("é")[0].encode('latin1'))
    s = s.replace('\xF6', utf8("ö")[0].encode('latin1'))
    #s = s.replace('\x95', '-')
    #s = s.replace('\x', "")
    #s = s.replace('\x', "")
    latin1 = codecs.getdecoder('latin1')
    s = latin1(s)[0].encode('utf8')
    if s == 'null':
        return None
    return s
Пример #6
0
    def __init__(self, filename):
        self.filename = filename
        self.stream = io.open(self.filename, "rb")

        # size
        self.stream.seek(0, io.SEEK_END)
        self.stream_size = float(self.stream.tell())
        self.stream.seek(0)

        # determine encoding
        bom = self.stream.read(max(len(codecs.BOM_UTF16_BE), len(codecs.BOM_UTF16_LE)))
        if bom.startswith(codecs.BOM_UTF16_BE):
            offset = len(codecs.BOM_UTF16_BE)
            self.decode = codecs.getdecoder("utf-16be")
            self.newline = "\r\n".encode("utf-16be")
        elif bom.startswith(codecs.BOM_UTF16_LE):
            offset = len(codecs.BOM_UTF16_LE)
            self.decode = codecs.getdecoder("utf-16le")
            self.newline = "\r\n".encode("utf-16le")
        else:
            offset = 0
            self.decode = codecs.getdecoder("utf-8")
            self.newline = "\r\n".encode("utf-8")
        self.newline_size = len(self.newline)

        # headers
        self.offset = offset
        self.headers = {}
        for line, offset, size in self.lines(offset):
            match = self.header_regex.match(line)
            if not match:
                break
            self.offset = offset + size
            key, value = match.groups()
            self.headers[key.lower()] = value
Пример #7
0
    def __init__ (self, file):
        # open stream
        self.file = file
        self.stream = open (file, 'rb', buffering = self.buffer_size)

        # determine encoding
        bom = self.stream.read (2)
        if bom == codecs.BOM_UTF16_BE:
            self.decode = codecs.getdecoder ('utf-16be')
            self.newline = '\r\n'.encode ('utf-16be')
        elif bom == codecs.BOM_UTF16_LE:
            self.decode = codecs.getdecoder ('utf-16le')
            self.newline = '\r\n'.encode ('utf-16le')
        else:
            self.stream.seek (0)
            self.decode = codecs.getdecoder ('utf-16')
            self.newline = '\r\n'.encode ('utf-16')
        self.offset = self.stream.tell ()

        # parse headers
        self.headers = {}
        for header, offset in self.lines (offset = 2):
            if not header.startswith ('#'):
                break
            self.offset = offset
            match = self.header_pattern.search (header)
            if match:
                header, value = match.groups ()
                self.headers [header.lower ()] = value
Пример #8
0
def ipn(request, item_check_callable=None):
    """
    PayPal IPN endpoint (notify_url).
    Used by both PayPal Payments Pro and Payments Standard to confirm transactions.
    http://tinyurl.com/d9vu9d
    
    PayPal IPN Simulator:
    https://developer.paypal.com/cgi-bin/devscr?cmd=_ipn-link-session
    """
    #TODO: Clean up code so that we don't need to set None here and have a lot
    #      of if checks just to determine if flag is set.
    flag = None
    ipn_obj = None
    
    #set the encoding of the request, so that request.POST can be correctly decoded.
    #see https://github.com/johnboxall/django-paypal/issues/32
    #https://code.djangoproject.com/ticket/14035, worth noting, but this doesn't 
    # affect this ipn view as there won't be uploaded files.
    encoding = request.POST.get('charset', '')
    try:
        codecs.getdecoder(encoding) # check if the codec exists
        request.encoding = encoding
    except:
        pass

    # Clean up the data as PayPal sends some weird values such as "N/A"
    # Also, need to cope with custom encoding, which is stored in the body (!).
    # Assuming the tolerate parsing of QueryDict and an ASCII-like encoding,
    # such as windows-1252, latin1 or UTF8, the following will work:

    encoding = request.POST.get('charset', None)

    if encoding is None:
        flag = "Invalid form - no charset passed, can't decode"
        data = None
    else:
        try:
            data = QueryDict(request.body, encoding=encoding)
        except LookupError:
            data = None
            flag = "Invalid form - invalid charset"

    if data is not None:
        date_fields = ('time_created', 'payment_date', 'next_payment_date',
                       'subscr_date', 'subscr_effective')
        for date_field in date_fields:
            if data.get(date_field) == 'N/A':
                del data[date_field]

        form = PayPalIPNForm(data)
        if form.is_valid():
            try:
                #When commit = False, object is returned without saving to DB.
                ipn_obj = form.save(commit=False)
            except Exception, e:
                flag = "Exception while processing. (%s)" % e
        else:
            flag = "Invalid form. (%s)" % form.errors
Пример #9
0
    def __init__(self, host, port, user, nick, name):
        self._host = host
        self._port = port
        self._user = user
        self._nick = nick
        self._name = name
        self._decoders = [getdecoder('utf-8'), getdecoder('latin-1')]

        self._listeners = {event: [] for event in events}
Пример #10
0
 def process(self, constant, content):
     if constant.options["encoding"] is not None:
         try:
             decoder = codecs.getdecoder(constant.options["encoding"])
         except LookupError:
             # Python bug(?): Try it with _codec
             decoder = codecs.getdecoder(constant.options["encoding"] + "_codec")
         # Don't convert to a string because it may be binary content.
         # It will be converted later if necessary
         return decoder(content.encode())[0]
Пример #11
0
 def test_utf_16_encode_decode(self):
     import codecs, sys
     x = '123abc'
     if sys.byteorder == 'big':
         assert codecs.getencoder('utf-16')(x) == (
                 b'\xfe\xff\x001\x002\x003\x00a\x00b\x00c', 6)
         assert codecs.getdecoder('utf-16')(
                 b'\xfe\xff\x001\x002\x003\x00a\x00b\x00c') == (x, 14)
     else:
         assert codecs.getencoder('utf-16')(x) == (
                 b'\xff\xfe1\x002\x003\x00a\x00b\x00c\x00', 6)
         assert codecs.getdecoder('utf-16')(
                 b'\xff\xfe1\x002\x003\x00a\x00b\x00c\x00') == (x, 14)
Пример #12
0
 def setEncoding(self,encoding):
     if 'ascii' in encoding:
         encoding='windows_1252' # so we don't throw an exception on high-bit set chars in there by mistake
     if encoding and not encoding =='text/html':
         try:
             canDecode = codecs.getdecoder(encoding)
             self.encoding = encoding
         except:
             try:
                 encoding='japanese.' +encoding
                 canDecode = codecs.getdecoder(encoding)
                 self.encoding = encoding
             except:
                 print "can't deal with encoding %s" % encoding
Пример #13
0
 def feedfile(self, fp, encoding):
   try:
     decoder = codecs.getdecoder(encoding)
   except LookupError:
     decoder = codecs.getdecoder('latin1')
   for line in fp:
     self.para = []
     self.feed(decoder(line, 'replace')[0])
     for x in self.para:
       yield x
   self.finish()
   for x in self.para:
     yield x
   return
Пример #14
0
 def __init__(self, username, password):
     self.username = username
     self.password = password
     self.user_data = None
     self.full_data = None
     self.gateway_data = None
     self.reader = codecs.getdecoder("utf-8")
Пример #15
0
def writeDoc(x, h):
  f = open(x)
  t = f.read()
  f.close()

  doc = bp

  # Get the title
  xd = libxml2.parseFile(x)
  ctxt = xd.xpathNewContext()
  ctxt.xpathRegisterNs('html', 'http://www.w3.org/1999/xhtml')

  title = ctxt.xpathEvalExpression('string(/fvdoc//html:div[@id="message"])')

  title = trimWS(title)
  doc = doc.replace('<title></title>', '<title>' + title + '</title>')


  for (sec, txt) in secRe.findall(t):
    r = re.compile('<h2>' + sec + '</h2>\s*<div class="docbody">\s*()</div>', re.IGNORECASE)
    idx = r.search(doc).start(1)
    doc = doc[:idx] + txt + doc[idx:]

  c = codecs.getdecoder('utf-8')

  doc = c(doc)[0]

  c = codecs.getencoder('iso-8859-1')

  f = open(h, 'w')
  f.write(c(doc, 'xmlcharrefreplace')[0])
  f.close()
Пример #16
0
def transcode(backend_stream, backend_encoding, frontend_encoding,
              errors='strict'):
    enc = codecs.getencoder(frontend_encoding)
    dec = codecs.getdecoder(frontend_encoding)
    rd = codecs.getreader(backend_encoding)
    wr = codecs.getwriter(backend_encoding)
    return codecs.StreamRecoder(backend_stream, enc, dec, rd, wr, errors)
Пример #17
0
    def readContents(self, contents):
        '''Read contents and create parsing context.

        contents are in native encoding, but with normalized line endings.
        '''
        (contents, _) = codecs.getdecoder(self.encoding)(contents)
        self.readUnicode(contents)
Пример #18
0
  def _GetUtf8Contents(self, file_name):
    """Check for errors in file_name and return a string for csv reader."""
    contents = self._FileContents(file_name)
    if not contents:  # Missing file
      return

    # Check for errors that will prevent csv.reader from working
    if len(contents) >= 2 and contents[0:2] in (codecs.BOM_UTF16_BE,
        codecs.BOM_UTF16_LE):
      self._problems.FileFormat("appears to be encoded in utf-16", (file_name, ))
      # Convert and continue, so we can find more errors
      contents = codecs.getdecoder('utf-16')(contents)[0].encode('utf-8')

    null_index = contents.find('\0')
    if null_index != -1:
      # It is easier to get some surrounding text than calculate the exact
      # row_num
      m = re.search(r'.{,20}\0.{,20}', contents, re.DOTALL)
      self._problems.FileFormat(
          "contains a null in text \"%s\" at byte %d" %
          (codecs.getencoder('string_escape')(m.group()), null_index + 1),
          (file_name, ))
      return

    # strip out any UTF-8 Byte Order Marker (otherwise it'll be
    # treated as part of the first column name, causing a mis-parse)
    contents = contents.lstrip(codecs.BOM_UTF8)
    return contents
Пример #19
0
def calc(bot, target, nick, command, text):
	import codecs
	import html.entities
	def substitute_entity(match):
		ent = match.group(3)
		if match.group(1) == "#":
			if match.group(2) == '':
				return chr(int(ent))
			elif match.group(2) == 'x':
				return chr(int('0x'+ent, 16))
		else:
			cp = html.entities.name2codepoint.get(ent)
			if cp:
				return chr(cp)
			return match.group()
	def decode_htmlentities(string):
		return entity_re.subn(substitute_entity, string)[0]

	if not text:
		return
	response = rs.get('http://www.wolframalpha.com/input/', params={'i': text}).text
	matches = re.findall('context\.jsonArray\.popups\.pod_....\.push\((.*)\);', response)
	if len(matches) < 2:
		bot.say(target, nick + ': Error calculating.')
		return
	input_interpretation = json.loads(matches[0])['stringified']
	result = json.loads(matches[1])['stringified']
	output = '%s = %s' % (input_interpretation, result)
	output = output.replace('\u00a0', ' ') # replace nbsp with space
	output = codecs.getdecoder('unicode_escape')(output)[0]
	output = re.subn('<sup>(.*)</sup>', r'^(\1)', output)[0]
	output = decode_htmlentities(output)
	bot.say(target, '%s: %s' % (nick, output))
Пример #20
0
def open_filename(filename, mode, encoding="utf-8"):
    """Opens a filename in the specified mode, with support for
    standard input and output as "-"."""
    
    if filename == "-":
        if mode[0] == "w":
            if mode[1:2] == "b":
                if sys.stdout.encoding is not None:
                    return codecs.getwriter(sys.stdout.encoding)(sys.stdout)
                else:
                    return sys.stdout
            else:
                if sys.stdout.encoding is not None:
                    return sys.stdout
                else:
                    return codecs.getdecoder(encoding)(sys.stdout)
        
        elif mode[0] == "r":
            if mode[1:2] == "b":
                if sys.stdin.encoding is not None:
                    return codecs.getreader(sys.stdin.encoding)(sys.stdin)
                else:
                    return sys.stdin
            else:
                if sys.stdin.encoding is not None:
                    return sys.stdin
                else:
                    return codes.getencoder(encoding)(sys.stdin)
    else:
        return open(filename, mode)
Пример #21
0
    def complete(self, body):
        decode_hex = codecs.getdecoder("hex_codec")
        total = bytearray()
        md5s = bytearray()

        last = None
        count = 0
        for pn, etag in body:
            part = self.parts.get(pn)
            part_etag = None
            if part is not None:
                part_etag = part.etag.replace('"', '')
                etag = etag.replace('"', '')
            if part is None or part_etag != etag:
                raise InvalidPart()
            if last is not None and len(last.value) < UPLOAD_PART_MIN_SIZE:
                raise EntityTooSmall()
            md5s.extend(decode_hex(part_etag)[0])
            total.extend(part.value)
            last = part
            count += 1

        etag = hashlib.md5()
        etag.update(bytes(md5s))
        return total, "{0}-{1}".format(etag.hexdigest(), count)
Пример #22
0
    def _dumpsigns(self, command):
        """
    dumpSigns [ <filename> ]

    Saves the text and location of every sign in the world to a text file.
    With no filename, saves signs to <worldname>.signs

    Output is newline-delimited. 5 lines per sign. Coordinates are
    on the first line, followed by four lines of sign text. For example:

        [229, 118, -15]
        "To boldy go
        where no man
        has gone
        before."

    Coordinates are ordered the same as point inputs:
        [North/South, Down/Up, East/West]

    """
        if len(command):
            filename = command[0]
        else:
            filename = self.level.displayName + ".signs"

        # It appears that Minecraft interprets the sign text as UTF-8,
        # so we should decode it as such too.
        decodeSignText = codecs.getdecoder('utf-8')
        # We happen to encode the output file in UTF-8 too, although
        # we could use another UTF encoding.  The '-sig' encoding puts
        # a signature at the start of the output file that tools such
        # as Microsoft Windows Notepad and Emacs understand to mean
        # the file has UTF-8 encoding.
        outFile = codecs.open(filename, "w", encoding='utf-8-sig')

        print "Dumping signs..."
        signCount = 0

        for i, cPos in enumerate(self.level.allChunks):
            try:
                chunk = self.level.getChunk(*cPos)
            except mclevelbase.ChunkMalformed:
                continue

            for tileEntity in chunk.TileEntities:
                if tileEntity["id"].value == "Sign":
                    signCount += 1

                    outFile.write(str(map(lambda x: tileEntity[x].value, "xyz")) + "\n")
                    for i in range(4):
                        signText = tileEntity["Text{0}".format(i + 1)].value
                        outFile.write(decodeSignText(signText)[0] + u"\n")

            if i % 100 == 0:
                print "Chunk {0}...".format(i)


        print "Dumped {0} signs to {1}".format(signCount, filename)

        outFile.close()
Пример #23
0
def main():

    #### Parse command-line arguments
    parser = argparse.ArgumentParser(description = \
             'Phonological CorpusTools: corpus object creation CL interface')
    parser.add_argument('csv_file_name', help='Name of input CSV file')
    parser.add_argument('-f', '--feature_file_name', default = '', type=str, help='Name of input feature file')
    parser.add_argument('-d', '--delimiter', default='\t', type=str, help='Character that delimits columns in the input file')
    parser.add_argument('-t', '--trans_delimiter', default='', type=str, help='Character that delimits segments in the input file')

    args = parser.parse_args()

    ####

    delimiter = codecs.getdecoder("unicode_escape")(args.delimiter)[0]

    try: # Full path specified
        filename, extension = os.path.splitext(args.csv_file_name)
        filename = path_leaf(filename)
        corpus = load_corpus_csv(args.csv_file_name, args.csv_file_name,
                delimiter, args.trans_delimiter, args.feature_file_name)
        save_binary(corpus, filename+'.corpus')
    except FileNotFoundError:
        #FIXME! os.path.join takes care of os specific paths
        try: # Unix filepaths
            filename, extension = os.path.splitext(os.path.dirname(os.path.realpath(__file__))+'/'+args.csv_file_name)
            corpus = load_corpus_csv(args.csv_file_name, os.path.dirname(os.path.realpath(__file__))+'/'+args.csv_file_name,
                    delimiter, args.trans_delimiter, os.path.dirname(os.path.realpath(__file__))+'/'+args.feature_file_name)
            save_binary(corpus, filename+'.corpus')
        except FileNotFoundError: # Windows filepaths
            filename, extension = os.path.splitext(os.path.dirname(os.path.realpath(__file__))+'\\'+args.csv_file_name)
            corpus = load_corpus_csv(args.csv_file_name, os.path.dirname(os.path.realpath(__file__))+'\\'+args.csv_file_name,
                    delimiter, args.trans_delimiter, os.path.dirname(os.path.realpath(__file__))+'\\'+args.feature_file_name)
            save_binary(corpus, filename+'.corpus')
Пример #24
0
def run_ltl2dra(formula):
    #----call ltl2dstar executable----
    cmd = "echo \"%s\"" % formula + " | " + "./MDP_TG/ltl2dstar " + "--ltl2nba=spin:./MDP_TG/ltl2ba --stutter=no - -"
    raw_output = check_output(cmd, shell=True)
    ascii_decoder = getdecoder("ascii")
    (output, _) = ascii_decoder(raw_output)
    return output
Пример #25
0
def Test(tester):
    tester.startTest("Checking for BIG5 codec")
    try:
        import codecs
        big5_decoder = codecs.getdecoder('big5')
    except LookupError:
        try:
            from encodings import big5
        except ImportError:
            tester.warning(
                "No BIG5 encoding support for case 1.  You can install \n"
                "BIG5 by downloading and installing ChineseCodes from\n"
                "ftp://python-codecs.sourceforge.net/pub/python-codecs/")
            tester.testDone()
            return
        else:
            big5_decode = big5.decode
    else:
        big5_decode = lambda s: big5_decoder(s)[0]
    tester.testDone()
            
    b5 = big5_decode(source_1)
    utf8 = b5.encode("utf-8")
        
    source = test_harness.FileInfo(string=utf8)
    sheet = test_harness.FileInfo(string=sheet_1)
    test_harness.XsltTest(tester, source, [sheet], expected_1)

    source = test_harness.FileInfo(string=source_2)
    sheet = test_harness.FileInfo(string=sheet_1)
    test_harness.XsltTest(tester, source, [sheet], expected_2)
    return
Пример #26
0
 def PrintTraceback(self, msg=None, skip=0, source=None, excInfo=None):
     if msg:
         self.PrintError(msg, source=source)
     if excInfo is None:
         excInfo = sys.exc_info()
     tbType, tbValue, tbTraceback = excInfo
     slist = ['Traceback (most recent call last) (%s):\n' % eg.Version.string]
     if tbTraceback:
         decode = codecs.getdecoder('mbcs')
         for fname, lno, funcName, text in extract_tb(tbTraceback)[skip:]:
             slist.append(
                 u'  File "%s", line %d, in %s\n' % (
                     decode(fname)[0], lno, funcName
                 )
             )
             if text:
                 slist.append("    %s\n" % text)
     for line in format_exception_only(tbType, tbValue):
         slist.append(decode(line)[0])
     error = "".join(slist)
     if source is not None:
         source = ref(source)
     self.Write(error.rstrip() + "\n", ERROR_ICON, source)
     if eg.debugLevel:
         oldStdErr.write(error)
Пример #27
0
 def _compose(self,obj):
    encoding = obj.get_root().get_encoding()
    decode = codecs.getdecoder(encoding)
    #Ensure text values can be transcribed in the system encoding
    # will rasise an exception we will propagate if not decodeable.
    decode(self.parts["txt"])
    pass
Пример #28
0
def rot13_decode(decvalue):
    """ ROT13 decode the specified value. Example Format: Uryyb Jbeyq  """
    
    decoder = getdecoder("rot-13")
    rot13 = decoder(decvalue) [0]
    
    return(rot13)
Пример #29
0
 def test_basics(self):
     s = "abc123"
     for encoding in all_string_encodings:
         (bytes, size) = codecs.getencoder(encoding)(s)
         self.assertEqual(size, len(s))
         (chars, size) = codecs.getdecoder(encoding)(bytes)
         self.assertEqual(chars, s, "%r != %r (encoding=%r)" % (chars, s, encoding))
	def __init__(self, hard):
		self.blames = {}
		ls_tree_r = subprocess.Popen("git ls-tree --name-only -r " + interval.get_ref(), shell=True, bufsize=1,
		                             stdout=subprocess.PIPE).stdout
		lines = ls_tree_r.readlines()

		for i, row in enumerate(lines):
			row = codecs.getdecoder("unicode_escape")(row.strip())[0]
			row = row.encode("latin-1", "replace")
			row = row.decode("utf-8", "replace").strip("\"").strip("'").strip()

			if FileDiff.is_valid_extension(row) and not filtering.set_filtered(FileDiff.get_filename(row)):
				if not missing.add(row):
					blame_string = "git blame -w {0} ".format("-C -C -M" if hard else "") + \
					               interval.get_since() + interval.get_ref() + " -- \"" + row + "\""
					thread = BlameThread(blame_string, FileDiff.get_extension(row), self.blames, row.strip())
					thread.daemon = True
					thread.start()

					if hard:
						Blame.output_progress(i, len(lines))

		# Make sure all threads have completed.
		for i in range(0, NUM_THREADS):
			__thread_lock__.acquire()
Пример #31
0
    def __next__(self):
        # Get the next non-blank line
        while True:  # python desperately needs do-while
            line = self._getnextline()

            if line is None:
                raise StopIteration()

            if len(line) != 0:
                break

        # Hack for evolution.  If ENCODING is QUOTED-PRINTABLE then it doesn't
        # offset the next line, so we look to see what the first char is
        normalcontinuations = True
        colon = line.find(':')
        if colon > 0:
            s = line[:colon].lower().split(";")

            if "quoted-printable" in s or 'encoding=quoted-printable' in s:
                normalcontinuations = False
                while line[-1] == "=" or line[-2] == '=':
                    if line[-1] == '=':
                        i = -1
                    else:
                        i = -2

                    nextl = self._getnextline()
                    if nextl[0] in ("\t", " "): nextl = nextl[1:]
                    line = line[:i] + nextl

        while normalcontinuations:
            nextline = self._lookahead()

            if nextline is None:
                break

            if len(nextline) == 0:
                break

            if nextline[0] != ' ' and nextline[0] != '\t':
                break

            line += self._getnextline()[1:]

        colon = line.find(':')

        if colon < 1:
            # some evolution vcards don't even have colons
            # raise VFileException("Invalid property: "+line)
            log.debug("Fixing up bad line: %s" % line)

            colon = len(line)
            line += ":"

        b4 = line[:colon]
        line = line[colon + 1:].strip()

        # upper case and split on semicolons
        items = b4.upper().split(";")

        newitems = []
        if isinstance(line, str):
            charset = None

        else:
            charset = "LATIN-1"

        for i in items:
            # ::TODO:: probably delete anything preceding a '.'
            # (see 5.8.2 in rfc 2425)
            # look for charset parameter
            if i.startswith("CHARSET="):
                charset = i[8:] or "LATIN-1"
                continue

        # unencode anything that needs it
            if not i.startswith(
                    "ENCODING="
            ) and not i == "QUOTED-PRINTABLE":  # evolution doesn't bother with "ENCODING="
                # ::TODO:: deal with backslashes, being especially careful with ones quoting semicolons
                newitems.append(i)
                continue

            try:
                if i == 'QUOTED-PRINTABLE' or i == "ENCODING=QUOTED-PRINTABLE":
                    # technically quoted printable is ascii only but we decode anyway since not all vcards comply
                    line = quopri.decodestring(line)

                elif i == 'ENCODING=B':
                    line = base64.decodestring(line)
                    charset = None

                else:
                    raise VFileException("unknown encoding: " + i)

            except Exception as e:
                if isinstance(e, VFileException):
                    raise e
                raise VFileException(
                    "Exception %s while processing encoding %s on data '%s'" %
                    (str(e), i, line))

        # ::TODO:: repeat above shenanigans looking for a VALUE= thingy and
        # convert line as in 5.8.4 of rfc 2425
        if len(newitems) == 0:
            raise VFileException("Line contains no property: %s" % (line, ))

        # charset frigging
        if charset is not None:
            try:
                decoder = codecs.getdecoder(
                    self._charset_aliases.get(charset, charset))
                line, _ = decoder(line)
            except LookupError:
                raise VFileException(
                    "unknown character set '%s' in parameters %s" %
                    (charset, b4))

        if newitems == ["BEGIN"] or newitems == ["END"]:
            line = line.upper()

        return newitems, line
Пример #32
0
def hex_decode(data):
    return getdecoder('hex')(data)[0]
Пример #33
0
  def run(self):

    eventsFile = '%s/lucene/build/C%d.events' % (self.parent.rootDir, self.id)
    if os.path.exists(eventsFile):
      os.remove(eventsFile)
    
    cmd = '%s -eventsfile %s' % (self.parent.command, eventsFile)

    # TODO
    #   - add -Dtests.seed=XXX, eg -Dtests.seed=771F118CC53F329
    #   - add -eventsfile /l/lucene.trunk/lucene/build/core/test/junit4-J0-0819129977b5076df.events @/l/lucene.trunk/lucene/build/core/test/junit4-J0-1916253054fa0d84f.suites

    try:
      #self.parent.remotePrint('C%d init' % self.id)

      # TODO
      p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stdin=subprocess.PIPE, env=self.parent.env)
      #self.parent.remotePrint('C%d subprocess started' % self.id)

      events = ReadEvents(p, eventsFile, self.parent)
      #self.parent.remotePrint('C%d startup0 done' % self.id)
      events.waitIdle()

      #self.parent.remotePrint('startup done C%d' % self.id)
      
      while True:
        job = self.parent.nextJob()
        if job is None:
          #self.parent.remotePrint('C%d no more jobs' % self.id)
          #p.stdin.close()
          p.kill()
          break
        #self.parent.remotePrint('C%d: job %s' % (self.id, job))
        p.stdin.write(job + '\n')
        results = events.waitIdle()

        endSuite = False
        output = []
        failed = False
        msec = None
        for l in results:
          #if l.find('"chunk": ') != -1 or l.find('"bytes": ') != -1:
          if l.find('"chunk": ') != -1:
            if False:
              chunk = l.strip().split()[-1][1:-1]
              #self.parent.remotePrint('C%d: chunk=%s' % (self.id, chunk))
              bytes = []
              idx = 0
              while idx < len(chunk):
                bytes.append(chr(int(chunk[idx:idx+2], 16)))
                idx += 2
              try:
                # Spooky I must replace!!!  eg chunk=383637205432313220433720524551205B636F6C6C656374696F6E315D207765626170703D6E756C6C20706174683D6E756C6C20706172616D733D7B736F72743D69642B61736326666C3D696426713D736F72745F74725F63616E6F6E3A22492B57696C6C2B5573652B5475726B6973682B436173F56E67227D20686974733D33207374617475733D30205154696D653D31200A
                output.append(codecs.getdecoder('UTF8')(''.join(bytes), errors='replace')[0])
              except:
                self.parent.remotePrint('C%d: EXC:\n%s\nchunk=%s' % (self.id, traceback.format_exc(), chunk))
            else:
              l = l.strip()[14:-1]
              
          if l.find('"trace": ') != -1:
            chunk = l.strip().replace('"trace": "', '')[:-2]
            chunk = chunk.replace('\\n', '\n')
            chunk = chunk.replace('\\t', '\t')
            output.append(chunk)
            if chunk.find('AssumptionViolatedException') == -1:
              failed = True
            
          if l.find('"SUITE_COMPLETED"') != -1:
            endSuite = True
          elif endSuite and l.find('"executionTime"') != -1:
            msec = int(l.strip()[:-1].split()[1])
            break

        if not failed:
          output = []
        self.parent.sendResult((job, msec, output))
        
    except:
      self.parent.remotePrint('C%d: EXC:\n%s' % (self.id, traceback.format_exc()))
Пример #34
0
# Copyright (C) 2018 Wayne Chan. Licensed under the Mozilla Public License,
# Please refer to http://mozilla.org/MPL/2.0/

import struct

from .. import util
from .. import protocol

from . import coin

__all__ = ['Newbitcoin']

_PAY2MINER = b'\x76\xb8\xb9\x88\xac'  # DUP OP_HASH512 OP_MINERHASH OP_EQUALVERIFY OP_CHECKSIG

import codecs
_decodeHex = codecs.getdecoder("hex_codec")


def decodeHex(
        s):  # avoid using 'ff'.decode('hex') that not supported in python3
    return _decodeHex(s)[0]


class Newbitcoin(coin.Coin):
    FINAL_CONFIRM_LEN = 504
    COINBASE_MATURITY = 16

    WEB_SERVER_ADDR = 'https://api.nb-coin.com'

    name = "newbitcoin"
    symbols = ['NBC']  # all symbols
Пример #35
0
                    yield tuple(pool[i] for i in indices[:r])
                    break
            else:
                return


if sys.platform == 'win32':
    now = time.clock
else:
    now = time.time

# Note: these functions return a tuple of (text, length), so when you call
# them, you have to add [0] on the end, e.g. str = utf8encode(unicode)[0]

utf8encode = codecs.getencoder("utf_8")
utf8decode = codecs.getdecoder("utf_8")

# Functions


def array_to_string(a):
    if IS_LITTLE:
        a = copy(a)
        a.byteswap()
    return a.tostring()


def string_to_array(typecode, s):
    a = array(typecode)
    a.fromstring(s)
    if IS_LITTLE:
Пример #36
0
    def get_name(self, name, platform=None, languages=None):
        """Returns the value of the given name in this font.

        :Parameters:
            `name`
                Either an integer, representing the name_id desired (see
                font format); or a string describing it, see below for
                valid names.
            `platform`
                Platform for the requested name.  Can be the integer ID,
                or a string describing it.  By default, the Microsoft
                platform is searched first, then Macintosh.
            `languages`
                A list of language IDs to search.  The first language
                which defines the requested name will be used.  By default,
                all English dialects are searched.

        If the name is not found, ``None`` is returned.  If the name
        is found, the value will be decoded and returned as a unicode
        string.  Currently only some common encodings are supported.

        Valid names to request are (supply as a string)::

            'copyright'
            'family'
            'subfamily'
            'identifier'
            'name'
            'version'
            'postscript'
            'trademark'
            'manufacturer'
            'designer'
            'description'
            'vendor-url'
            'designer-url'
            'license'
            'license-url'
            'preferred-family'
            'preferred-subfamily'
            'compatible-name'
            'sample'

        Valid platforms to request are (supply as a string)::

            'unicode'
            'macintosh'
            'iso'
            'microsoft'
            'custom'
        """

        names = self.get_names()
        if type(name) == str:
            name = self._name_id_lookup[name]
        if not platform:
            for platform in ('microsoft','macintosh'):
                value = self.get_name(name, platform, languages)
                if value:
                    return value
        if type(platform) == str:
            platform = self._platform_id_lookup[platform]
        if not (platform, name) in names:
            return None

        if platform == 3: # setup for microsoft
            encodings = self._microsoft_encoding_lookup
            if not languages:
                # Default to english languages for microsoft
                languages = (0x409,0x809,0xc09,0x1009,0x1409,0x1809)
        elif platform == 1: # setup for macintosh
            encodings = self.__macintosh_encoding_lookup
            if not languages:
                # Default to english for macintosh
                languages = (0,)

        for record in names[(platform, name)]:
            if record[1] in languages and record[0] in encodings:
                decoder = codecs.getdecoder(encodings[record[0]])
                return decoder(record[2])[0]
        return None
def clean_path(path):
    return codecs.getdecoder("unicode_escape")(os.path.expanduser(path))[0]
Пример #38
0
def _unescape(text):
    """Unescape unicode character codes within a string.
  """
    pattern = r'\\{1,2}u[0-9a-fA-F]{4}'
    decode = lambda x: codecs.getdecoder('unicode_escape')(x.group())[0]
    return re.sub(pattern, decode, text)
Пример #39
0
import codecs
import re
import unicodedata

_utf8_encoder = codecs.getencoder("utf-8")
_utf8_decoder = codecs.getdecoder("utf-8")
_unicode_normalize = unicodedata.normalize
_unicode_category = unicodedata.category

MATCH_OPEN_TAG = re.compile(r"\<([^\/]+?)\>")
MATCH_CLOSE_TAG = re.compile(r"\<(\/.+?)\>")


def enc_utf8(s):
    return _utf8_encoder(s)[0]


def dec_utf8(s):
    return _utf8_decoder(s)[0]


def normalize_token(t):
    key = t.replace(u"\u00A9", u"c")

    def is_not_mn(c):
        cat = _unicode_category(c)
        return cat != "Mn"

    return u"".join(c for c in _unicode_normalize(u"NFKD", key)
                    if is_not_mn(c))
Пример #40
0
# -*- coding: utf-8 -*-
from __future__ import absolute_import

import codecs
import os
import sys
import warnings
import re
from collections import OrderedDict

__escape_decoder = codecs.getdecoder('unicode_escape')
__posix_variable = re.compile('\$\{[^\}]*\}')


def decode_escaped(escaped):
    return __escape_decoder(escaped)[0]


def load_dotenv(dotenv_path):
    """
    Read a .env file and load into os.environ.
    """
    if not os.path.exists(dotenv_path):
        warnings.warn("Not loading %s - it doesn't exist." % dotenv_path)
        return None
    for k, v in dotenv_values(dotenv_path).items():
        os.environ.setdefault(k, v)
    return True


def get_key(dotenv_path, key_to_get):
Пример #41
0
    def download_part(self, url, output, key, media_sequence, tasks):
        if key.iv is not None:
            iv = str(key.iv)[2:]
        else:
            iv = "%032x" % media_sequence
        backend = default_backend()
        decode_hex = codecs.getdecoder("hex_codec")
        aes = Cipher(algorithms.AES(key.key_value),
                     modes.CBC(decode_hex(iv)[0]),
                     backend=backend)
        decryptor = aes.decryptor()
        self.part_offset[str(media_sequence)] = 0
        with self.session.get(url, stream=True) as response:
            response.raise_for_status()
            filename = output
            self.part_size[str(
                media_sequence)] = response.headers['Content-Length']
            with open(filename, 'ab') as f_handle:
                for chunk in response.iter_content(chunk_size=blocksize):
                    self.offset += len(chunk)
                    self.part_offset[str(media_sequence)] += len(chunk)
                    total_size = 0

                    finished_calculating_ = True
                    for i in threading.enumerate():
                        if i.name == 'compute_total_size_thread_':
                            finished_calculating_ = finished_calculating_ and not i.is_alive(
                            )
                    if not finished_calculating_:
                        self.progress_bar_print[0] = progress_bar_(
                            self.offset,
                            self.offset + 1,
                            size_adj(self.offset, 'harddisk'),
                            '    @ ' + str(
                                size_adj(
                                    self.offset /
                                    (time.process_time() - self.start_t),
                                    'internet')),
                            text_end_lenght=17,
                            center_bgc='',
                            defult_bgc='')
                    else:
                        for part_size in self.part_size.values():
                            total_size += int(part_size)
                        self.progress_bar_print[0] = progress_bar_(
                            self.offset,
                            total_size,
                            size_adj(self.offset, 'harddisk') + '/' +
                            size_adj(total_size, 'harddisk'),
                            '%' + str(round(self.offset * 100 / total_size)) +
                            ' @ ' + str(
                                size_adj(
                                    self.offset /
                                    (time.process_time() - self.start_t),
                                    'internet')),
                            text_end_lenght=17)
                    self.progress_bar_print[
                        self.tasks1.index(threading.currentThread()) +
                        1] = progress_bar_(
                            self.part_offset[str(media_sequence)],
                            int(response.headers['Content-Length']),
                            'Part#' + str(media_sequence),
                            '%' + str(
                                round(
                                    self.part_offset[str(media_sequence)] * 100
                                    / int(response.headers['Content-Length']),
                                    2)) + ' ',
                            text_end_lenght=17)
                    if not self.printing:
                        self.printing = True
                        print('\n'.join(self.progress_bar_print) +
                              '\033[A' * len(self.progress_bar_print) + '\x0d')
                        self.printing = False
                    if chunk:
                        f_handle.write(decryptor.update(chunk))
                decryptor.finalize()
            return filename
Пример #42
0
# -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, unicode_literals

import codecs
import fileinput
import io
import os
import re
import sys
from subprocess import Popen, PIPE, STDOUT
import warnings
from collections import OrderedDict

from .compat import StringIO

__escape_decoder = codecs.getdecoder('string_escape')
__posix_variable = re.compile('\$\{[^\}]*\}')


def decode_escaped(escaped):
    return __escape_decoder(escaped)[0]


def parse_line(line):
    line = line.strip()

    # Ignore lines with `#` or which doesn't have `=` in it.
    if not line or line.startswith('#') or '=' not in line:
        return None, None

    k, v = line.split('=', 1)
Пример #43
0
def index():
    if request.method == 'GET':
        return "<RESPONSE>Send XML data</RESPONSE>"

    if request.method == 'POST':
    # if request.method == 'GET':

        # Database connection
        try:
            conn = psycopg2.connect(
                    database=DB_NAME,
                    user=DB_USER,
                    password=DB_PASS,
                    host=DB_HOST
                )
            cursor = conn.cursor()
        except:
            return "<RESPONSE>Error connecting to database</RESPONSE>"


        data = request.data
        # data = xml
        if type(data) == bytes:
            data = request.data.decode('utf-8')

        declaration = data.find("?>")
        if declaration != -1:
            data = data[declaration + 2:]

        try:
            # Parse XML data
            dataTag = ET.fromstring(data)
            data = dataTag[0][0][0].text
            data = codecs.getdecoder("unicode_escape")(data)[0]
            dataTag = ET.fromstring(data)
            dataTag = dataTag[0]

            # Validate tag names
            for x in dataTag:
                if not re.match(VALID_IDENTIFIER, x.tag):
                    return "<RESPONSE>Invalid XML (Tag name)</RESPONSE>"

            # Validate tag values
            for x in dataTag:
                if x.text == None:
                    x.text = ""
                if not re.match(VALID_VALUE, x.text):
                    return "<RESPONSE>Invalid XML (Tag value)</RESPONSE>"

            # Create table if not exists
            cursor.execute("select * from information_schema.tables where table_name=%s", (DB_TABLE,))
            exists = bool(cursor.rowcount)
            if not exists:
                tags = ",".join(['"{0}" character varying'.format(x.tag) for x in dataTag])
                query = 'CREATE TABLE public."{0}" ({1}) WITH (OIDS=FALSE);'.format(DB_TABLE, tags)
                cursor.execute(query)
                conn.commit()

            # Check for duplicates in DB
            cond = "AND".join(["\"{0}\" = '{1}'".format(x.tag, x.text) for x in dataTag])
            query = 'SELECT * FROM "{0}" WHERE {1} LIMIT 1;'.format(DB_TABLE, cond)
            cursor.execute(query)
            res = cursor.fetchall()
            conn.commit()
            if len(res) != 0:
                return "<RESPONSE>Dataset already in database</RESPONSE>"



            tags = ",".join(['"{0}"'.format(x.tag) for x in dataTag])
            values = ",".join(["'{0}'".format(x.text) for x in dataTag])

            query = 'INSERT INTO "{0}"({1}) VALUES({2});'.format(DB_TABLE, tags, values)

            # Insert in DB
            cursor.execute(query)
            conn.commit()

        except:
            return "<RESPONSE>Invalid XML</RESPONSE>"
        return "<RESPONSE>OK</RESPONSE>"
Пример #44
0
class Filter(callbacks.Plugin):
    """This plugin offers several commands which transform text in some way.
    It also provides the capability of using such commands to 'filter' the
    output of the bot -- for instance, you could make everything the bot says
    be in leetspeak, or Morse code, or any number of other kinds of filters.
    Not very useful, but definitely quite fun :)"""
    def __init__(self, irc):
        self.__parent = super(Filter, self)
        self.__parent.__init__(irc)
        self.outFilters = ircutils.IrcDict()

    def outFilter(self, irc, msg):
        if msg.command in ('PRIVMSG', 'NOTICE'):
            if msg.channel in self.outFilters:
                if ircmsgs.isAction(msg):
                    s = ircmsgs.unAction(msg)
                else:
                    s = msg.args[1]
                methods = self.outFilters[msg.channel]
                for filtercommand in methods:
                    myIrc = MyFilterProxy()
                    filtercommand(myIrc, msg, [s])
                    s = myIrc.s
                if ircmsgs.isAction(msg):
                    msg = ircmsgs.action(msg.args[0], s, msg=msg)
                else:
                    msg = ircmsgs.IrcMsg(msg=msg, args=(msg.args[0], s))
        return msg

    _filterCommands = [
        'jeffk', 'leet', 'rot13', 'hexlify', 'binary', 'scramble', 'morse',
        'reverse', 'colorize', 'squish', 'supa1337', 'stripcolor', 'aol',
        'rainbow', 'spellit', 'hebrew', 'undup', 'uwu', 'gnu', 'shrink',
        'uniud', 'capwords', 'caps', 'vowelrot', 'stripformatting'
    ]

    @internationalizeDocstring
    def outfilter(self, irc, msg, args, channel, command):
        """[<channel>] [<command>]

        Sets the outFilter of this plugin to be <command>.  If no command is
        given, unsets the outFilter.  <channel> is only necessary if the
        message isn't sent in the channel itself.
        """
        if command:
            if not self.isDisabled(command) and \
               command in self._filterCommands:
                method = getattr(self, command)
                self.outFilters.setdefault(channel, []).append(method)
                irc.replySuccess()
            else:
                irc.error(_('That\'s not a valid filter command.'))
        else:
            self.outFilters[channel] = []
            irc.replySuccess()

    outfilter = wrap(outfilter, [('checkChannelCapability', 'op'),
                                 additional('commandName')])

    _hebrew_remover = utils.str.MultipleRemover('aeiou')

    @internationalizeDocstring
    def hebrew(self, irc, msg, args, text):
        """<text>

        Removes all the vowels from <text>.  (If you're curious why this is
        named 'hebrew' it's because I (jemfinch) thought of it in Hebrew class,
        and printed Hebrew often elides the vowels.)
        """
        irc.reply(self._hebrew_remover(text))

    hebrew = wrap(hebrew, ['text'])

    def _squish(self, text):
        return text.replace(' ', '')

    @internationalizeDocstring
    def squish(self, irc, msg, args, text):
        """<text>

        Removes all the spaces from <text>.
        """
        irc.reply(self._squish(text))

    squish = wrap(squish, ['text'])

    @internationalizeDocstring
    def undup(self, irc, msg, args, text):
        """<text>

        Returns <text>, with all consecutive duplicated letters removed.
        """
        L = [text[0]]
        for c in text:
            if c != L[-1]:
                L.append(c)
        irc.reply(''.join(L))

    undup = wrap(undup, ['text'])

    @internationalizeDocstring
    def binary(self, irc, msg, args, text):
        """<text>

        Returns the binary representation of <text>.
        """
        L = []
        if minisix.PY3:
            if isinstance(text, str):
                bytes_ = text.encode()
            else:
                bytes_ = text
        else:
            if isinstance(text, unicode):
                text = text.encode()
            bytes_ = map(ord, text)
        for i in bytes_:
            LL = []
            assert i <= 256
            counter = 8
            while i:
                counter -= 1
                if i & 1:
                    LL.append('1')
                else:
                    LL.append('0')
                i >>= 1
            while counter:
                LL.append('0')
                counter -= 1
            LL.reverse()
            L.extend(LL)
        irc.reply(''.join(L))

    binary = wrap(binary, ['text'])

    def unbinary(self, irc, msg, args, text):
        """<text>

        Returns the character representation of binary <text>.
        Assumes ASCII, 8 digits per character.
        """
        text = self._squish(text)  # Strip spaces.
        try:
            L = [chr(int(text[i:(i + 8)], 2)) for i in range(0, len(text), 8)]
            irc.reply(''.join(L))
        except ValueError:
            irc.errorInvalid('binary string', text)

    unbinary = wrap(unbinary, ['text'])

    _hex_encoder = staticmethod(codecs.getencoder('hex_codec'))

    def hexlify(self, irc, msg, args, text):
        """<text>

        Returns a hexstring from the given string; a hexstring is a string
        composed of the hexadecimal value of each character in the string
        """
        irc.reply(self._hex_encoder(text.encode('utf8'))[0].decode('utf8'))

    hexlify = wrap(hexlify, ['text'])

    _hex_decoder = staticmethod(codecs.getdecoder('hex_codec'))

    @internationalizeDocstring
    def unhexlify(self, irc, msg, args, text):
        """<hexstring>

        Returns the string corresponding to <hexstring>.  Obviously,
        <hexstring> must be a string of hexadecimal digits.
        """
        try:
            irc.reply(
                self._hex_decoder(text.encode('utf8'))[0].decode(
                    'utf8', 'replace'))
        except TypeError:
            irc.error(_('Invalid input.'))

    unhexlify = wrap(unhexlify, ['text'])

    _rot13_encoder = codecs.getencoder('rot-13')

    @internationalizeDocstring
    def rot13(self, irc, msg, args, text):
        """<text>

        Rotates <text> 13 characters to the right in the alphabet.  Rot13 is
        commonly used for text that simply needs to be hidden from inadvertent
        reading by roaming eyes, since it's easily reversible.
        """
        if minisix.PY2:
            text = text.decode('utf8')
        irc.reply(self._rot13_encoder(text)[0])

    rot13 = wrap(rot13, ['text'])

    _leettrans = utils.str.MultipleReplacer(
        dict(list(zip('oOaAeElBTiIts', '004433187!1+5'))))
    _leetres = [
        (re.compile(r'\b(?:(?:[yY][o0O][oO0uU])|u)\b'), 'j00'),
        (re.compile(r'fear'), 'ph33r'),
        (re.compile(r'[aA][tT][eE]'), '8'),
        (re.compile(r'[aA][tT]'), '@'),
        (re.compile(r'[sS]\b'), 'z'),
        (re.compile(r'x'), '><'),
    ]

    @internationalizeDocstring
    def leet(self, irc, msg, args, text):
        """<text>

        Returns the l33tspeak version of <text>
        """
        for (r, sub) in self._leetres:
            text = re.sub(r, sub, text)
        text = self._leettrans(text)
        irc.reply(text)

    leet = wrap(leet, ['text'])

    _supaleetreplacers = [
        ('xX', '><'),
        ('kK', '|<'),
        ('rR', '|2'),
        ('hH', '|-|'),
        ('L', '|_'),
        ('uU', '|_|'),
        ('O', '()'),
        ('nN', '|\\|'),
        ('mM', '/\\/\\'),
        ('G', '6'),
        ('Ss', '$'),
        ('i', ';'),
        ('aA', '/-\\'),
        ('eE', '3'),
        ('t', '+'),
        ('T', '7'),
        ('l', '1'),
        ('D', '|)'),
        ('B', '|3'),
        ('I', ']['),
        ('Vv', '\\/'),
        ('wW', '\\/\\/'),
        ('d', 'c|'),
        ('b', '|>'),
        ('c', '<'),
        ('h', '|n'),
    ]

    @internationalizeDocstring
    def supa1337(self, irc, msg, args, text):
        """<text>

        Replies with an especially k-rad translation of <text>.
        """
        for (r, sub) in self._leetres:
            text = re.sub(r, sub, text)
        for (letters, replacement) in self._supaleetreplacers:
            for letter in letters:
                text = text.replace(letter, replacement)
        irc.reply(text)

    supa1337 = wrap(supa1337, ['text'])

    _scrambleRe = re.compile(r'(?:\b|(?![a-zA-Z]))([a-zA-Z])([a-zA-Z]*)'
                             r'([a-zA-Z])(?:\b|(?![a-zA-Z]))')

    @internationalizeDocstring
    def scramble(self, irc, msg, args, text):
        """<text>

        Replies with a string where each word is scrambled; i.e., each internal
        letter (that is, all letters but the first and last) are shuffled.
        """
        def _subber(m):
            L = list(m.group(2))
            random.shuffle(L)
            return '%s%s%s' % (m.group(1), ''.join(L), m.group(3))

        s = self._scrambleRe.sub(_subber, text)
        irc.reply(s)

    scramble = wrap(scramble, ['text'])

    _morseCode = {
        "A": ".-",
        "B": "-...",
        "C": "-.-.",
        "D": "-..",
        "E": ".",
        "F": "..-.",
        "G": "--.",
        "H": "....",
        "I": "..",
        "J": ".---",
        "K": "-.-",
        "L": ".-..",
        "M": "--",
        "N": "-.",
        "O": "---",
        "P": ".--.",
        "Q": "--.-",
        "R": ".-.",
        "S": "...",
        "T": "-",
        "U": "..-",
        "V": "...-",
        "W": ".--",
        "X": "-..-",
        "Y": "-.--",
        "Z": "--..",
        "0": "-----",
        "1": ".----",
        "2": "..---",
        "3": "...--",
        "4": "....-",
        "5": ".....",
        "6": "-....",
        "7": "--...",
        "8": "---..",
        "9": "----.",
        ".": ".-.-.-",
        ",": "--..--",
        ":": "---...",
        "?": "..--..",
        "'": ".----.",
        "-": "-....-",
        "/": "-..-.",
        '"': ".-..-.",
        "@": ".--.-.",
        "=": "-...-"
    }
    _revMorseCode = dict([(y, x) for (x, y) in _morseCode.items()])
    _unmorsere = re.compile('([.-]+)')

    @internationalizeDocstring
    def unmorse(self, irc, msg, args, text):
        """<Morse code text>

        Does the reverse of the morse command.
        """
        text = text.replace('_', '-')

        def morseToLetter(m):
            s = m.group(1)
            return self._revMorseCode.get(s, s)

        text = self._unmorsere.sub(morseToLetter, text)
        text = text.replace('  ', '\x00')
        text = text.replace(' ', '')
        text = text.replace('\x00', ' ')
        irc.reply(text)

    unmorse = wrap(unmorse, ['text'])

    @internationalizeDocstring
    def morse(self, irc, msg, args, text):
        """<text>

        Gives the Morse code equivalent of a given string.
        """
        L = []
        for c in text.upper():
            L.append(self._morseCode.get(c, c))
        irc.reply(' '.join(L))

    morse = wrap(morse, ['text'])

    @internationalizeDocstring
    def reverse(self, irc, msg, args, text):
        """<text>

        Reverses <text>.
        """
        irc.reply(text[::-1])

    reverse = wrap(reverse, ['text'])

    @internationalizeDocstring
    def _color(self, c, fg=None):
        if c == ' ':
            return c
        if fg is None:
            fg = random.randint(2, 15)
        fg = str(fg).zfill(2)
        return '\x03%s%s' % (fg, c)

    @internationalizeDocstring
    def colorize(self, irc, msg, args, text):
        """<text>

        Returns <text> with each character randomly colorized.
        """
        if minisix.PY2:
            text = text.decode('utf-8')
        text = ircutils.stripColor(text)
        L = [self._color(c) for c in text]
        if minisix.PY2:
            L = [c.encode('utf-8') for c in L]
        irc.reply('%s%s' % (''.join(L), '\x03'))

    colorize = wrap(colorize, ['text'])

    @internationalizeDocstring
    def rainbow(self, irc, msg, args, text):
        """<text>

        Returns <text> colorized like a rainbow.
        """
        if minisix.PY2:
            text = text.decode('utf-8')
        text = ircutils.stripColor(text)
        colors = utils.iter.cycle([
            '05', '04', '07', '08', '09', '03', '11', '10', '12', '02', '06',
            '13'
        ])
        L = [self._color(c, fg=next(colors)) for c in text]
        if minisix.PY2:
            L = [c.encode('utf-8') for c in L]
        irc.reply(''.join(L) + '\x03')

    rainbow = wrap(rainbow, ['text'])

    @wrap(['text'])
    def stripformatting(self, irc, msg, args, text):
        """<text>

        Strips bold, underline, and colors from <text>."""
        irc.reply(ircutils.stripFormatting(text))

    @internationalizeDocstring
    def stripcolor(self, irc, msg, args, text):
        """<text>

        Returns <text> stripped of all color codes.
        """
        irc.reply(ircutils.stripColor(text))

    stripcolor = wrap(stripcolor, ['text'])

    @internationalizeDocstring
    def aol(self, irc, msg, args, text):
        """<text>

        Returns <text> as if an AOL user had said it.
        """
        text = text.replace(' you ', ' u ')
        text = text.replace(' are ', ' r ')
        text = text.replace(' love ', ' <3 ')
        text = text.replace(' luv ', ' <3 ')
        text = text.replace(' too ', ' 2 ')
        text = text.replace(' to ', ' 2 ')
        text = text.replace(' two ', ' 2 ')
        text = text.replace('fore', '4')
        text = text.replace(' for ', ' 4 ')
        text = text.replace('be', 'b')
        text = text.replace('four', ' 4 ')
        text = text.replace(' their ', ' there ')
        text = text.replace(', ', ' ')
        text = text.replace(',', ' ')
        text = text.replace("'", '')
        text = text.replace('one', '1')
        smiley = utils.iter.choice(['<3', ':)', ':-)', ':D', ':-D'])
        text += smiley * 3
        irc.reply(text)

    aol = wrap(aol, ['text'])

    @internationalizeDocstring
    def jeffk(self, irc, msg, args, text):
        """<text>

        Returns <text> as if JeffK had said it himself.
        """
        def randomlyPick(L):
            return utils.iter.choice(L)

        def quoteOrNothing(m):
            return randomlyPick(['"', '']).join(m.groups())

        def randomlyReplace(s, probability=0.5):
            def f(m):
                if random.random() < probability:
                    return m.expand(s)
                else:
                    return m.group(0)

            return f

        def randomExclaims(m):
            if random.random() < 0.85:
                return ('!' * random.randrange(1, 5)) + m.group(1)
            else:
                return '.' + m.group(1)

        def randomlyShuffle(m):
            L = list(m.groups())
            random.shuffle(L)
            return ''.join(L)

        def lessRandomlyShuffle(m):
            L = list(m.groups())
            if random.random() < .4:
                random.shuffle(L)
            return ''.join(L)

        def randomlyLaugh(text, probability=.3):
            if random.random() < probability:
                if random.random() < .5:
                    insult = utils.iter.choice([
                        ' fagot1', ' fagorts', ' jerks', 'fagot'
                        ' jerk', 'dumbshoes', ' dumbshoe'
                    ])
                else:
                    insult = ''
                laugh1 = utils.iter.choice(['ha', 'hah', 'lol', 'l0l', 'ahh'])
                laugh2 = utils.iter.choice(['ha', 'hah', 'lol', 'l0l', 'ahh'])
                laugh1 = laugh1 * random.randrange(1, 5)
                laugh2 = laugh2 * random.randrange(1, 5)
                exclaim = utils.iter.choice(
                    ['!', '~', '!~', '~!!~~', '!!~', '~~~!'])
                exclaim += utils.iter.choice(
                    ['!', '~', '!~', '~!!~~', '!!~', '~~~!'])
                if random.random() < 0.5:
                    exclaim += utils.iter.choice(
                        ['!', '~', '!~', '~!!~~', '!!~', '~~~!'])
                laugh = ''.join([' ', laugh1, laugh2, insult, exclaim])
                text += laugh
            return text

        if random.random() < .03:
            irc.reply(randomlyLaugh('NO YUO', probability=1))
            return
        alwaysInsertions = {
            r'er\b': 'ar',
            r'\bthe\b': 'teh',
            r'\byou\b': 'yuo',
            r'\bis\b': 'si',
            r'\blike\b': 'liek',
            r'[^e]ing\b': 'eing',
        }
        for (r, s) in alwaysInsertions.items():
            text = re.sub(r, s, text)
        randomInsertions = {
            r'i': 'ui',
            r'le\b': 'al',
            r'i': 'io',
            r'l': 'll',
            r'to': 'too',
            r'that': 'taht',
            r'[^s]c([ei])': r'sci\1',
            r'ed\b': r'e',
            r'\band\b': 'adn',
            r'\bhere\b': 'hear',
            r'\bthey\'re': 'their',
            r'\bthere\b': 'they\'re',
            r'\btheir\b': 'there',
            r'[^e]y': 'ey',
        }
        for (r, s) in randomInsertions.items():
            text = re.sub(r, randomlyReplace(s), text)
        text = re.sub(r'(\w)\'(\w)', quoteOrNothing, text)
        text = re.sub(r'\.(\s+|$)', randomExclaims, text)
        text = re.sub(r'([aeiou])([aeiou])', randomlyShuffle, text)
        text = re.sub(r'([bcdfghkjlmnpqrstvwxyz])([bcdfghkjlmnpqrstvwxyz])',
                      lessRandomlyShuffle, text)
        text = randomlyLaugh(text)
        if random.random() < .4:
            text = text.upper()
        irc.reply(text)

    jeffk = wrap(jeffk, ['text'])

    # Keeping these separate so people can just replace the alphabets for
    # whatever their language of choice
    _spellLetters = {
        'a': _('ay'),
        'b': _('bee'),
        'c': _('see'),
        'd': _('dee'),
        'e': _('ee'),
        'f': _('eff'),
        'g': _('gee'),
        'h': _('aych'),
        'i': _('eye'),
        'j': _('jay'),
        'k': _('kay'),
        'l': _('ell'),
        'm': _('em'),
        'n': _('en'),
        'o': _('oh'),
        'p': _('pee'),
        'q': _('cue'),
        'r': _('arr'),
        's': _('ess'),
        't': _('tee'),
        'u': _('you'),
        'v': _('vee'),
        'w': _('double-you'),
        'x': _('ecks'),
        'y': _('why'),
        'z': _('zee')
    }
    for (k, v) in list(_spellLetters.items()):
        _spellLetters[k.upper()] = v
    _spellPunctuation = {
        '!': _('exclamation point'),
        '"': _('quote'),
        '#': _('pound'),
        '$': _('dollar sign'),
        '%': _('percent'),
        '&': _('ampersand'),
        '\'': _('single quote'),
        '(': _('left paren'),
        ')': _('right paren'),
        '*': _('asterisk'),
        '+': _('plus'),
        ',': _('comma'),
        '-': _('minus'),
        '.': _('period'),
        '/': _('slash'),
        ':': _('colon'),
        ';': _('semicolon'),
        '<': _('less than'),
        '=': _('equals'),
        '>': _('greater than'),
        '?': _('question mark'),
        '@': _('at'),
        '[': _('left bracket'),
        '\\': _('backslash'),
        ']': _('right bracket'),
        '^': _('caret'),
        '_': _('underscore'),
        '`': _('backtick'),
        '{': _('left brace'),
        '|': _('pipe'),
        '}': _('right brace'),
        '~': _('tilde')
    }
    _spellNumbers = {
        '0': _('zero'),
        '1': _('one'),
        '2': _('two'),
        '3': _('three'),
        '4': _('four'),
        '5': _('five'),
        '6': _('six'),
        '7': _('seven'),
        '8': _('eight'),
        '9': _('nine')
    }

    @internationalizeDocstring
    def spellit(self, irc, msg, args, text):
        """<text>

        Returns <text>, phonetically spelled out.
        """
        d = {}
        if self.registryValue('spellit.replaceLetters'):
            d.update(self._spellLetters)
        if self.registryValue('spellit.replaceNumbers'):
            d.update(self._spellNumbers)
        if self.registryValue('spellit.replacePunctuation'):
            d.update(self._spellPunctuation)


# A bug in unicode on OSX prevents me from testing this.
##         dd = {}
##         for (c, v) in d.items():
##             dd[ord(c)] = unicode(v + ' ')
##         irc.reply(unicode(text).translate(dd))
        out = minisix.io.StringIO()
        write = out.write
        for c in text:
            try:
                c = d[c]
                write(' ')
            except KeyError:
                pass
            write(c)
        irc.reply(out.getvalue().strip())

    spellit = wrap(spellit, ['text'])

    @internationalizeDocstring
    def gnu(self, irc, msg, args, text):
        """<text>

        Returns <text> as GNU/RMS would say it.
        """
        irc.reply(' '.join(['GNU/' + s for s in text.split()]))

    gnu = wrap(gnu, ['text'])

    @internationalizeDocstring
    def shrink(self, irc, msg, args, text):
        """<text>

        Returns <text> with each word longer than
        supybot.plugins.Filter.shrink.minimum being shrunken (i.e., like
        "internationalization" becomes "i18n").
        """
        L = []
        minimum = self.registryValue('shrink.minimum', msg.channel,
                                     irc.network)
        r = re.compile(r'[A-Za-z]{%s,}' % minimum)

        def shrink(m):
            s = m.group(0)
            return ''.join((s[0], str(len(s) - 2), s[-1]))

        text = r.sub(shrink, text)
        irc.reply(text)

    shrink = wrap(shrink, ['text'])

    # TODO: 2,4,;
    # XXX suckiest: B,K,P,Q,T
    # alternatives: 3: U+2107
    _uniudMap = {
        ' ': ' ',
        '0': '0',
        '@': '@',
        '!': '\u00a1',
        '1': '1',
        'A': '\u2200',
        '"': '\u201e',
        '2': '\u2681',
        'B': 'q',
        '#': '#',
        '3': '\u0190',
        'C': '\u0186',
        '$': '$',
        '4': '\u2683',
        'D': '\u15e1',
        '%': '%',
        '5': '\u1515',
        'E': '\u018e',
        '&': '\u214b',
        '6': '9',
        'F': '\u2132',
        "'": '\u0375',
        '7': 'L',
        'G': '\u2141',
        '(': ')',
        '8': '8',
        'H': 'H',
        ')': '(',
        '9': '6',
        'I': 'I',
        '*': '*',
        ':': ':',
        'J': '\u148b',
        '+': '+',
        ';': ';',
        'K': '\u029e',
        ',': '\u2018',
        '<': '>',
        'L': '\u2142',
        '-': '-',
        '=': '=',
        'M': '\u019c',
        '.': '\u02d9',
        '>': '<',
        'N': 'N',
        '/': '/',
        '?': '\u00bf',
        'O': 'O',
        'P': 'd',
        '`': '\u02ce',
        'p': 'd',
        'Q': 'b',
        'a': '\u0250',
        'q': 'b',
        'R': '\u1d1a',
        'b': 'q',
        'r': '\u0279',
        'S': 'S',
        'c': '\u0254',
        's': 's',
        'T': '\u22a5',
        'd': 'p',
        't': '\u0287',
        'U': '\u144e',
        'e': '\u01dd',
        '': 'n',
        'V': '\u039b',
        'f': '\u214e',
        'v': '\u028c',
        'W': 'M',
        'g': '\u0253',
        'w': '\u028d',
        'X': 'X',
        'h': '\u0265',
        'x': 'x',
        'Y': '\u2144',
        'i': '\u1d09',
        'y': '\u028e',
        'Z': 'Z',
        'j': '\u027f',
        'z': 'z',
        '[': ']',
        'k': '\u029e',
        '{': '}',
        '\\': '\\',
        'l': '\u05df',
        '|': '|',
        ']': '[',
        'm': '\u026f',
        '}': '{',
        '^': '\u2335',
        'n': '',
        '~': '~',
        '_': '\u203e',
        'o': 'o',
    }

    @internationalizeDocstring
    def uniud(self, irc, msg, args, text):
        """<text>

        Returns <text> rotated 180 degrees. Only really works for ASCII
        printable characters.
        """
        turned = []
        tlen = 0
        for c in text:
            if c in self._uniudMap:
                tmp = self._uniudMap[c]
                if not len(tmp):
                    tmp = '\ufffd'
                turned.append(tmp)
                tlen += 1
            elif c == '\t':
                tablen = 8 - tlen % 8
                turned.append(' ' * tablen)
                tlen += tablen
            elif ord(c) >= 32:
                turned.append(c)
                tlen += 1
        s = '%s \x02 \x02' % ''.join(reversed(turned))
        irc.reply(s)

    uniud = wrap(uniud, ['text'])

    def capwords(self, irc, msg, args, text):
        """<text>

        Capitalises the first letter of each word.
        """
        text = string.capwords(text)
        irc.reply(text)

    capwords = wrap(capwords, ['text'])

    def caps(self, irc, msg, args, text):
        """<text>

        EVERYONE LOVES CAPS LOCK.
        """
        irc.reply(text.upper())

    caps = wrap(caps, ['text'])

    _vowelrottrans = utils.str.MultipleReplacer(
        dict(list(zip('aeiouAEIOU', 'eiouaEIOUA'))))

    def vowelrot(self, irc, msg, args, text):
        """<text>

        Returns <text> with vowels rotated
        """
        text = self._vowelrottrans(text)
        irc.reply(text)

    vowelrot = wrap(vowelrot, ['text'])

    _uwutrans = utils.str.MultipleReplacer(dict(list(zip('lrLR', 'wwWW'))))

    def uwu(self, irc, msg, args, text):
        """<text>

        Returns <text> in uwu-speak.
        """
        text = self._uwutrans(text)
        text += random.choice([''] * 10 + [' uwu', ' UwU', ' owo', ' OwO'])
        irc.reply(text)

    uwu = wrap(uwu, ['text'])
Пример #45
0
import codecs
import hashlib
import random
import unittest

from mozharness.mozilla.merkle import InclusionProof, MerkleTree

decode_hex = codecs.getdecoder("hex_codec")
encode_hex = codecs.getencoder("hex_codec")

# Pre-computed tree on 7 inputs
#
#         ______F_____
#        /            \
#     __D__           _E_
#    /     \         /   \
#   A       B       C     |
#  / \     / \     / \    |
# 0   1   2   3   4   5   6
hash_fn = hashlib.sha256

data = [
    decode_hex("fbc459361fc111024c6d1fd83d23a9ff")[0],
    decode_hex("ae3a44925afec860451cd8658b3cadde")[0],
    decode_hex("418903fe6ef29fc8cab93d778a7b018b")[0],
    decode_hex("3d1c53c00b2e137af8c4c23a06388c6b")[0],
    decode_hex("e656ebd8e2758bc72599e5896be357be")[0],
    decode_hex("81aae91cf90be172eedd1c75c349bf9e")[0],
    decode_hex("00c262edf8b0bc345aca769e8733e25e")[0],
]
Пример #46
0
    def __init__(self,
                 url,
                 persistent=True,
                 timeout=None,
                 ssl_key=None,
                 ssl_cert=None,
                 post_headers={}):
        """
            url -- URI pointing to the SOLR instance. Examples:

                http://localhost:8080/solr
                https://solr-server/solr

                Your python install must be compiled with SSL support for the
                https:// schemes to work. (Most pre-packaged pythons are.)

            persistent -- Keep a persistent HTTP connection open.
                Defaults to true

            timeout -- Timeout, in seconds, for the server to response.
                By default, use the python default timeout (of none?)

            ssl_key, ssl_cert -- If using client-side key files for
                SSL authentication,  these should be, respectively,
                your PEM key file and certificate file

        """

        self.scheme, self.host, self.path = urlparse.urlparse(url, 'http')[:3]
        self.url = url

        assert self.scheme in ('http', 'https')

        self.persistent = persistent
        self.reconnects = 0
        self.timeout = timeout
        self.ssl_key = ssl_key
        self.ssl_cert = ssl_cert

        kwargs = {}

        if self.timeout and _python_version >= 2.6 and _python_version < 3:
            kwargs['timeout'] = self.timeout

        if self.scheme == 'https':
            self.conn = httplib.HTTPSConnection(self.host,
                                                key_file=ssl_key,
                                                cert_file=ssl_cert,
                                                **kwargs)
        else:
            self.conn = httplib.HTTPConnection(self.host, **kwargs)

        # this is int, not bool!
        self.batch_cnt = 0
        self.response_version = 2.2
        self.encoder = codecs.getencoder('utf-8')

        # Responses from Solr will always be in UTF-8
        self.decoder = codecs.getdecoder('utf-8')

        # Set timeout, if applicable.
        if self.timeout and _python_version < 2.6:
            self.conn.connect()
            if self.scheme == 'http':
                self.conn.sock.settimeout(self.timeout)
            elif self.scheme == 'https':
                self.conn.sock.sock.settimeout(self.timeout)

        self.xmlheaders = {'Content-Type': 'text/xml; charset=utf-8'}
        self.xmlheaders.update(post_headers)
        if not self.persistent:
            self.xmlheaders['Connection'] = 'close'

        self.form_headers = {
            'Content-Type': 'application/x-www-form-urlencoded; charset=utf-8'
        }

        if not self.persistent:
            self.form_headers['Connection'] = 'close'
Пример #47
0
# -*- coding:utf-8 -*-

import socket
import base64
import random
import requests
import time
import binascii
import codecs

decode_hex = codecs.getdecoder('hex_codec')


def str2hex(x):
    asdad = bytes(x, 'utf-8')
    return asdad.hex()


def h2bin(x):
    return decode_hex(x.replace(' ', '').replace('\n', ''))[0]


def random_str(len):
    str1 = ""
    for i in range(len):
        str1 += (random.choice("ABCDEFGH1234567890"))
    return str(str1)


def get_ver_ip(ip):
    csock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
Пример #48
0
#!/usr/bin/env python
# MIME Header Parsing - Chapter 9
# mime_parse_headers.py
# This program requires Python 2.2.2 or above

import sys, email, codecs
from email import Header

msg = email.message_from_file(sys.stdin)
for header, value in msg.items():
    headerparts = Header.decode_header(value)
    headerval = []
    for part in headerparts:
        data, charset = part
	if charset is None:
	    charset = 'ascii'
	dec = codecs.getdecoder(charset)
	enc = codecs.getencoder('iso-8859-1')
	data = enc(dec(data)[0])[0]
	headerval.append(data)
    print "%s: %s" % (header, " ".join(headerval))

Пример #49
0
 def _decode(t, coding):
     return codecs.getdecoder(coding)(t)[0]
Пример #50
0
    def parseString(self, cssText, encoding=None):
        if isinstance(cssText, str):
            cssText = codecs.getdecoder('css')(cssText, encoding=encoding)[0]
        
        tokens = self._tokenizer.tokenize(cssText, fullsheet=True)
                
        def COMMENT(val, line, col):
            self._handler.comment(val[2:-2], line, col)

        def EOF(val, line, col):
            self._handler.endDocument(val, line, col)
                
        def simple(t):            
            map = {'COMMENT': COMMENT,
                   'S': lambda val, line, col: None,
                   'EOF': EOF}
            type_, val, line, col = t
            if type_ in map:
                map[type_](val, line, col)
                return True
            else:
                return False
                        
        # START PARSING
        t = next(tokens)
        type_, val, line, col = t
        
        encoding = 'utf-8'                
        if 'CHARSET_SYM' == type_:
            # @charset "encoding";
            # S
            encodingtoken = next(tokens)
            semicolontoken = next(tokens)
            if 'STRING' == type_:
                encoding = helper.stringvalue(val)
            # ;
            if 'STRING' == encodingtoken[0] and semicolontoken:
                encoding = helper.stringvalue(encodingtoken[1])    
            else:
                self._errorHandler.fatal('Invalid @charset')
                
            t = next(tokens)
            type_, val, line, col = t
            
        self._handler.startDocument(encoding)
                
        while True:
            start = (line, col)
            try:
                if simple(t):
                    pass

                elif 'ATKEYWORD' == type_ or type_ in ('PAGE_SYM', 'MEDIA_SYM', 'FONT_FACE_SYM'):
                    atRule = [val]
                    braces = 0
                    while True:
                        # read till end ; 
                        # TODO: or {}
                        t = next(tokens)
                        type_, val, line, col = t
                        atRule.append(val) 
                        if ';' == val and not braces:
                            break
                        elif '{' == val:
                            braces += 1
                        elif '}' == val:
                            braces -= 1
                            if braces == 0:
                                break
                            
                    self._handler.ignorableAtRule(''.join(atRule), *start)

                elif 'IMPORT_SYM' == type_:
                    # import URI or STRING media? name?
                    uri, media, name = None, None, None
                    while True:
                        t = next(tokens)
                        type_, val, line, col = t
                        if 'STRING' == type_:
                            uri = helper.stringvalue(val)
                        elif 'URI' == type_:
                            uri = helper.urivalue(val)
                        elif ';' == val:
                            break
                    
                    if uri:    
                        self._handler.importStyle(uri, media, name)
                    else:
                        self._errorHandler.error('Invalid @import'
                                                 ' declaration at %r' 
                                                 % (start,))
                        
                elif 'NAMESPACE_SYM' == type_:
                    prefix, uri = None, None
                    while True:
                        t = next(tokens)
                        type_, val, line, col = t
                        if 'IDENT' == type_:
                            prefix = val
                        elif 'STRING' == type_:
                            uri = helper.stringvalue(val)
                        elif 'URI' == type_:
                            uri = helper.urivalue(val)
                        elif ';' == val:
                            break
                    if uri:
                        self._handler.namespaceDeclaration(prefix, uri, *start)
                    else:
                        self._errorHandler.error('Invalid @namespace'
                                                 ' declaration at %r' 
                                                 % (start,))
    
                else:
                    # CSSSTYLERULE
                    selector = []
                    selectors = []
                    while True:
                        # selectors[, selector]* {
                        if 'S' == type_:
                            selector.append(' ')
                        elif simple(t):
                            pass
                        elif ',' == val:
                            selectors.append(''.join(selector).strip())
                            selector = []
                        elif '{' == val:
                            selectors.append(''.join(selector).strip())
                            self._handler.startSelector(selectors, *start)
                            break
                        else:
                            selector.append(val)
                            
                        t = next(tokens)
                        type_, val, line, col = t
                                            
                    end = None
                    while True:
                        # name: value [!important][;name: value [!important]]*;?
                        name, value, important = None, [], False
                        
                        while True:
                            # name:
                            t = next(tokens)
                            type_, val, line, col = t
                            if 'S' == type_:
                                pass
                            elif simple(t):
                                pass
                            elif 'IDENT' == type_:
                                if name:
                                    self._errorHandler.error('more than one property name', t)
                                else:
                                    name = val
                            elif ':' == val:
                                if not name:
                                    self._errorHandler.error('no property name', t)
                                break
                            elif ';' == val:
                                self._errorHandler.error('premature end of property', t)
                                end = val
                                break
                            elif '}' == val:
                                if name:
                                    self._errorHandler.error('premature end of property', t)
                                end = val
                                break
                            else:
                                self._errorHandler.error('unexpected property name token %r' % val, t)

                        while not ';' == end and not '}' == end:
                            # value !;}
                            t = next(tokens)
                            type_, val, line, col = t
                            
                            if 'S' == type_:
                                value.append(' ')
                            elif simple(t):
                                pass
                            elif '!' == val or ';' == val or '}' == val:
                                value = ''.join(value).strip()
                                if not value:
                                    self._errorHandler.error('premature end of property (no value)', t)
                                end = val
                                break
                            else:
                                value.append(val)

                        while '!' == end:
                            # !important
                            t = next(tokens)
                            type_, val, line, col = t
                            
                            if simple(t):
                                pass
                            elif 'IDENT' == type_ and not important:
                                important = True
                            elif ';' == val or '}' == val:
                                end = val
                                break
                            else:
                                self._errorHandler.error('unexpected priority token %r' % val)

                        if name and value:
                            self._handler.property(name, value, important)
                            
                        if '}' == end:
                            self._handler.endSelector(selectors, line=line, col=col)
                            break
                        else:
                            # reset
                            end = None

                    else:
                        self._handler.endSelector(selectors, line=line, col=col)

                t = next(tokens)
                type_, val, line, col = t

            except StopIteration:
                break
Пример #51
0
    pass


class InvalidRegistryValue(RegistryException):
    pass


class NonExistentRegistryEntry(RegistryException, AttributeError):
    # If we use hasattr() on a configuration group/value, Python 3 calls
    # __getattr__ and looks for an AttributeError, so __getattr__ has to
    # raise an AttributeError if a registry entry does not exist.
    pass


ENCODING = 'string_escape' if minisix.PY2 else 'unicode_escape'
decoder = codecs.getdecoder(ENCODING)
encoder = codecs.getencoder(ENCODING)

if hasattr(time, 'monotonic'):
    monotonic_time = time.monotonic
else:
    # fallback for python < 3.3
    monotonic_time = time.time

_cache = utils.InsensitivePreservingDict()
_lastModified = 0


def open_registry(filename, clear=False):
    """Initializes the module by loading the registry file into memory."""
    global _lastModified
Пример #52
0
from tempfile import mkstemp, NamedTemporaryFile
try:
    import simplejson as json
except ImportError:
    import json
import cPickle as pickle
import glob
from urlparse import urlparse as stdlib_urlparse, ParseResult
import itertools

import eventlet
from eventlet import GreenPool, sleep, Timeout
from eventlet.green import socket, threading
import netifaces
import codecs
utf8_decoder = codecs.getdecoder('utf-8')
utf8_encoder = codecs.getencoder('utf-8')

from logging.handlers import SysLogHandler
import logging

# setup notice level logging

NOTICE = 25
logging._levelNames[NOTICE] = 'NOTICE'
SysLogHandler.priority_map['NOTICE'] = 'notice'

# These are lazily pulled from libc elsewhere

_sys_fsync = None
_sys_fallocate = None
Пример #53
0
 def test_decoder_is_searchable_by_name(self):
     decoder = codecs.getdecoder('rotunicode')
     self.assertIsNotNone(decoder)
Пример #54
0
 def test_errorcallback_longindex(self):
     dec = codecs.getdecoder('euc-kr')
     myreplace  = lambda exc: (u'', sys.maxint+1)
     codecs.register_error('test.cjktest', myreplace)
     self.assertRaises(IndexError, dec,
                       'apple\x92ham\x93spam', 'test.cjktest')
# -*- coding: utf-8 -*-
from __future__ import print_function
import codecs
# import big5uao
import big5uao_3

# a煊b喆c凜凛
test = b'a\x95\x4Fb\x95\xEDc\xBB\xFE\x81\x60'

dec = codecs.getdecoder('big5uao')
enc = codecs.getencoder('big5uao')

d0, d1 = dec(test)
print(d0, d1)

test = b'\x95\x4F\x80\x80'
d0, d1 = dec(test, errors='replace')
print(d0, d1)

# 應是Big5+UAO的 0xa7 0x41 0xa6 0x6e
s = '你好'  # 2.x u'你好'      3.x '你好'

e = enc(s)
for b in e[0]:
    print('%x ' % b, end='')
print(e[1])

s = '\uEEEE\uDDDD'
e = enc(s, errors='strict')
for b in e[0]:
    print('%x ' % b, end='')
Пример #56
0
def uppercase_escape(s):
    unicode_escape = codecs.getdecoder('unicode_escape')
    return re.sub(r'\\U[0-9a-fA-F]{8}',
                  lambda m: unicode_escape(m.group(0))[0], s)
Пример #57
0
def getdecoder(codec):
    try:
        return codecs.getdecoder(codec)
    except:
        return FailingCodec(codec).fail
Пример #58
0
    def __init__(self,
                 url,
                 persistent=True,
                 timeout=None,
                 ssl_key=None,
                 ssl_cert=None,
                 http_user=None,
                 http_pass=None,
                 post_headers={},
                 max_retries=3,
                 debug=False):
        """
            url -- URI pointing to the Solr instance. Examples:

                http://localhost:8080/solr
                https://solr-server/solr

                Your python install must be compiled with SSL support for the
                https:// schemes to work. (Most pre-packaged pythons are.)

            persistent -- Keep a persistent HTTP connection open.
                Defaults to true

            timeout -- Timeout, in seconds, for the server to response.
                By default, use the python default timeout (of none?)

            ssl_key, ssl_cert -- If using client-side key files for
                SSL authentication,  these should be, respectively,
                your PEM key file and certificate file.

            http_user, http_pass -- If given, include HTTP Basic authentication 
                in all request headers.

        """

        self.scheme, self.host, self.path = urlparse.urlparse(url, 'http')[:3]
        self.url = url

        assert self.scheme in ('http', 'https')

        self.persistent = persistent
        self.reconnects = 0
        self.timeout = timeout
        self.ssl_key = ssl_key
        self.ssl_cert = ssl_cert
        self.max_retries = int(max_retries)

        assert self.max_retries >= 0

        kwargs = {}

        if self.timeout and _python_version >= 2.6 and _python_version < 3:
            kwargs['timeout'] = self.timeout

        if self.scheme == 'https':
            self.conn = httplib.HTTPSConnection(self.host,
                                                key_file=ssl_key,
                                                cert_file=ssl_cert,
                                                **kwargs)
        else:
            self.conn = httplib.HTTPConnection(self.host, **kwargs)

        self.response_version = 2.2
        self.encoder = codecs.getencoder('utf-8')

        # Responses from Solr will always be in UTF-8
        self.decoder = codecs.getdecoder('utf-8')

        # Set timeout, if applicable.
        if self.timeout and _python_version < 2.6:
            self.conn.connect()
            if self.scheme == 'http':
                self.conn.sock.settimeout(self.timeout)
            elif self.scheme == 'https':
                self.conn.sock.sock.settimeout(self.timeout)

        self.xmlheaders = {'Content-Type': 'text/xml; charset=utf-8'}
        self.xmlheaders.update(post_headers)
        if not self.persistent:
            self.xmlheaders['Connection'] = 'close'

        self.form_headers = {
            'Content-Type': 'application/x-www-form-urlencoded; charset=utf-8'
        }

        if http_user is not None and http_pass is not None:
            http_auth = http_user + ':' + http_pass
            http_auth = 'Basic ' + http_auth.encode('base64').strip()
            self.auth_headers = {'Authorization': http_auth}
        else:
            self.auth_headers = {}

        if not self.persistent:
            self.form_headers['Connection'] = 'close'

        self.debug = debug
        self.select = SearchHandler(self, "/select")
Пример #59
0
def archive(path, dest, encoding, kind='zip', callback=None):
    """
    Used to archive the given `path`.

    `dest` define the destination of archive. Either a filename or fileobj.

    `encoding` the encoding of path.

    `kind` define the archive type to be created.

    `callback` a function to be called after processing each file.
    """
    assert isinstance(path, bytes)
    assert dest
    assert encoding
    assert kind in ARCHIVERS

    # Get the right decode function.
    decoder = codecs.getdecoder(encoding)
    assert decoder
    if PY3 and kind != 'zip':

        def decode(val):
            return decoder(val, 'surrogateescape')[0]
    else:

        def decode(val):
            return decoder(val, 'replace')[0]

    # Norm the path (remove ../, ./)
    path = os.path.normpath(path)

    # Create a tar.gz archive
    logger.info("creating archive from [%r]", path)
    archiver = ARCHIVERS[kind](dest)

    # Add files to the archive
    for root, dirs, files in os.walk(path, topdown=True, followlinks=False):
        for name in chain(dirs, files):
            filename = os.path.join(root, name)
            assert filename.startswith(path)
            arcname = filename[len(path) + 1:]
            if PY3:
                # Py3, doesn't support bytes file path. So we need
                # to use surrogate escape to escape invalid unicode char.
                filename = filename.decode('ascii', 'surrogateescape')
            # Always need to decode the arcname as unicode to support non-ascii.
            arcname = decode(arcname)
            assert isinstance(arcname, str)

            # Add the file to the archive.
            logger.debug("adding file [%r] to archive", filename)
            archiver.addfile(filename, arcname)
            logger.debug("file [%r] added to archive", filename)

            # Make a call to callback function
            if callback:
                callback(filename)

    # Close the archive
    archiver.close()
Пример #60
0
def migrate_users_from_SKForum(request=None):
    #if not request.user.is_superuser:
    #raise Exception('super user specific action')

    cursor = get_cursor()
    hex = codecs.getencoder('hex')
    latin1 = codecs.getdecoder('latin1')

    everyone = get_everyone_group()

    cursor.execute(
        'select id, name, password, realname, SecretEmail, PublicEmail, email, ICQ, telephone, mobilephone, address, other, birthdate from users where id != 1'
    )
    users = cursor.fetchall()
    for user in users:
        u = User(id=user[0],
                 username=fix_string(user[1]),
                 email=user[4],
                 is_staff=False,
                 is_superuser=False)

        if u.email == None:
            u.email = user[5]
        if u.email == None:
            u.email = user[6]

        hexpassword = hex(base64.b64decode(user[2]))
        if hexpassword[1] > 18:
            u.password = "******" + hexpassword[0]
        else:
            u.password = "******"

        realname = user[3].rsplit(None, 1)
        if len(realname) >= 1:
            u.first_name = fix_string(realname[0])
        if u.first_name == None:
            u.first_name = ''
        if len(realname) >= 2:
            u.last_name = fix_string(realname[1])
        if u.last_name == None:
            u.last_name = ''

        u.save()

        # display name 1
        #d = Detail(name='display name', value=fix_string(user[1]), user=u)
        #d.save()

        def add_detail(object, name):
            if object != None and object != '' and object != 'null':
                d = Detail(name=name, value=fix_string(object), user=u)
                if d.value != None:
                    d.save()

        add_detail(user[5], 'public email')
        add_detail(user[6], 'protected email')
        add_detail(user[7], 'ICQ')
        add_detail(user[8], 'telephone')
        add_detail(user[9], 'mobilephone')
        add_detail(user[10], 'address')
        add_detail(user[11], 'other')

        # birthdate 12
        if user[12] != None and user[12] != '' and user[12] != 'null':
            m = MetaUser(user=u, birthday=user[12])
            m.save()

    db.close()