コード例 #1
0
  def __init__(self, stream, fieldnames, encoding='utf-8', **kwds):
    """Initialzer.

    Args:
      stream: Stream to write to.
      fieldnames: Fieldnames to pass to the DictWriter.
      encoding: Desired encoding.
      kwds: Additional arguments to pass to the DictWriter.
    """

    writer = codecs.getwriter(encoding)



    if (writer is encodings.utf_8.StreamWriter or
        writer is encodings.ascii.StreamWriter or
        writer is encodings.latin_1.StreamWriter or
        writer is encodings.cp1252.StreamWriter):
      self.no_recoding = True
      self.encoder = codecs.getencoder(encoding)
      self.writer = csv.DictWriter(stream, fieldnames, **kwds)
    else:
      self.no_recoding = False
      self.encoder = codecs.getencoder('utf-8')
      self.queue = cStringIO.StringIO()
      self.writer = csv.DictWriter(self.queue, fieldnames, **kwds)
      self.stream = writer(stream)
コード例 #2
0
ファイル: plugin.py プロジェクト: Erika-Mustermann/Limnoria
    def encode(self, irc, msg, args, encoding, text):
        """<encoding> <text>

        Returns an encoded form of the given text; the valid encodings are
        available in the documentation of the Python codecs module:
        <http://docs.python.org/library/codecs.html#standard-encodings>.
        """
        # Binary codecs are prefixed with _codec in Python 3
        if encoding in 'base64 bz2 hex quopri uu zlib':
            encoding += '_codec'
        if encoding.endswith('_codec'):
            text = text.encode()

        # Do the encoding
        try:
            encoder = codecs.getencoder(encoding)
        except LookupError:
            irc.errorInvalid(_('encoding'), encoding)
        text = encoder(text)[0]

        # If this is a binary codec, re-encode it with base64
        if encoding.endswith('_codec') and encoding != 'base64_codec':
            text = codecs.getencoder('base64_codec')(text)[0].decode()

        # Change result into a string
        if sys.version_info[0] < 3 and isinstance(text, unicode):
            text = text.encode('utf-8')
        elif sys.version_info[0] >= 3 and isinstance(text, bytes):
            text = text.decode()

        # Reply
        irc.reply(text.rstrip('\n'))
コード例 #3
0
ファイル: text_decode.py プロジェクト: facelessuser/Rummage
def has_bom(content):
    """Check for `UTF8`, `UTF16`, and `UTF32` BOMs."""

    bom = None
    if content.startswith(codecs.BOM_UTF8):
        bom = Encoding('utf-8', codecs.BOM_UTF8)
    elif content == codecs.BOM_UTF32_BE:
        bom = Encoding('utf-32-be', codecs.BOM_UTF32_BE)
    elif content == codecs.BOM_UTF32_LE:
        bom = Encoding('utf-32-le', codecs.BOM_UTF32_LE)
    elif content == BOM_10646_UC4_3412:
        bom = Encoding(ENCODING_10646_UC4_3412, BOM_10646_UC4_3412)
    elif content == BOM_10646_UC4_2143:
        bom = Encoding(ENCODING_10646_UC4_2143, BOM_10646_UC4_2143)
    elif content.startswith(codecs.BOM_UTF16_BE):
        bom = Encoding('utf-16-be', codecs.BOM_UTF16_BE)
    elif content.startswith(codecs.BOM_UTF16_LE):
        bom = Encoding('utf-16-le', codecs.BOM_UTF16_LE)

    # It is doubtful we have an encoder that can handle these middle endian
    # encodings, but lets give it a try and default to bin if nothing is found.
    # Not sure who'd use these encodings anyways.
    if bom and bom.encode in (ENCODING_10646_UC4_2143, ENCODING_10646_UC4_3412):
        try:
            codecs.getencoder(bom.encode)
        except Exception:
            bom = Encoding('bin', None)

    return bom
コード例 #4
0
ファイル: util.py プロジェクト: nrgaway/qubes-tools
def encoded(u, encoding):
    """
    Encode string u (denoting it is expected to be in Unicode) if there's
    encoding to be done. Tries to mask the difference between Python 2 and 3,
    which have different models of string processing, and different codec APIs
    and quirks. Some Python 3 encoders further require ``bytes`` in, not
    ``str``. These are first encoded into utf-8, encoded, then decoded.
    """
    if not encoding:
        return u
    elif _PY3:
        try:
            return getencoder(encoding)(u)[0]
        except LookupError:
            name = encoding.replace('-', '')  # base-64 becomes base64 e.g.
            bytesout = getencoder(name + '_codec')(u.encode('utf-8'))[0]
            if name in set(['base64', 'hex', 'quopri', 'uu']):
                return bytesout.decode('utf-8')
            else:
                return bytesout

        # NB PY3 requires lower-level interface for many codecs. s.encode('utf-8')
        # works fine, but others do not. Some codecs convert bytes to bytes,
        # and are not properly looked up by nickname (e.g. 'base64'). These are
        # managed by first encoding into utf-8, then if it makes sense decoding
        # back into a string. The others are things like bz2 and zlib--binary
        # encodings that have little use to us here.

        # There are also be some slight variations in results that will make
        # testing more fun. Possibly related to adding or not adding a terminal
        # newline.
    else:
        # Python 2 may not have the best handling of Unicode, but by
        # by God its encode operations are straightforward!
        return u.encode(encoding)
コード例 #5
0
ファイル: tests.py プロジェクト: ccomb/pyramid_mailer
    def test_attach_as_body_and_html(self):
        import codecs
        from pyramid_mailer.message import Message
        from pyramid_mailer.message import Attachment

        charset = 'latin-1'
        text_encoded = b('LaPe\xf1a')
        text = text_encoded.decode(charset)
        text_html = '<p>' + text + '</p>'
        transfer_encoding = 'quoted-printable'
        body = Attachment(data=text,
                          transfer_encoding=transfer_encoding)
        html = Attachment(data=text_html,
                          transfer_encoding=transfer_encoding)
        msg = Message(subject="testing",
                      sender="*****@*****.**",
                      recipients=["*****@*****.**"],
                      body=body, html=html)
        message = msg.to_message()
        body_part, html_part = message.get_payload()
        self.assertEqual(
            body_part['Content-Type'], 'text/plain')
        self.assertEqual(
            body_part['Content-Transfer-Encoding'], transfer_encoding)
        self.assertEqual(body_part.get_payload(),
                         codecs.getencoder('quopri_codec')(
                             text.encode(charset))[0].decode('ascii'))
        self.assertEqual(
            html_part['Content-Type'], 'text/html')
        self.assertEqual(
            html_part['Content-Transfer-Encoding'], transfer_encoding)
        self.assertEqual(html_part.get_payload(),
                         codecs.getencoder('quopri_codec')(
                             text_html.encode(charset))[0].decode('ascii'))
コード例 #6
0
ファイル: deffnet.py プロジェクト: CLPeters/DeFFNetIzer
    def handlestory(self):
        try:
            # Unicode fun stuff
            self.codecs = ['utf-8', 'latin-1', 'niceutf-8']
            try:
                c = self.entryboxes['Encoding'].get()
                codecs.getencoder(c)
                self.codecs.insert(0,c)
            except LookupError:
                pass
   
            try:
                # Strip out all non-digit characters.
                self.story = int(re.sub(r'\D',"",self.entryboxes['Story ID'].get()))
            except ValueError:
                self.setstatus("Invalid Story ID", True)
                return



   
            self.setstatus("Retrieving story info")
            self.soups = {}
            source = self.retrieveChapter(1)
   
            self.soups[1] = self.makeSoup(source) # Fetch down the first chapter to extract metadata
            self.numchaps, self.title, self.author = self.extractStoryInfo(self.soups[1])
   
            self.labels['Title']['text'] = self.title
            self.labels['Author']['text'] = self.author
            self.labels['NumChaps']['text'] = self.numchaps
            self.entryboxes['Chapters']['state'] = NORMAL
            self.buttons['Chapters']['state'] = NORMAL
   
   
            self.entryboxes['Chapters'].delete(0,END)
            self.entryboxes['Directory'].delete(0,END)
            self.entryboxes['Directory']['state'] = DISABLED
            self.buttons['Directory']['state'] = DISABLED
                   
            self.completedshow()
            self.completedenable(False)        
            self.completedchecked(False)
            self.oneshotchecked(False)
   
       
            self.setstatus("Ready")
        except:
            builder = cStringIO.StringIO()
            builder.write("Platform: %s\nStory: %d\n\n" % (platform(), self.story))
           
            traceback.print_exc(file=builder)
            e = ExceptionBox(root, "Exception", builder.getvalue())
コード例 #7
0
ファイル: test_codecs.py プロジェクト: Qointum/pypy
 def test_utf_16_encode_decode(self):
     import codecs, sys
     x = '123abc'
     if sys.byteorder == 'big':
         assert codecs.getencoder('utf-16')(x) == (
                 b'\xfe\xff\x001\x002\x003\x00a\x00b\x00c', 6)
         assert codecs.getdecoder('utf-16')(
                 b'\xfe\xff\x001\x002\x003\x00a\x00b\x00c') == (x, 14)
     else:
         assert codecs.getencoder('utf-16')(x) == (
                 b'\xff\xfe1\x002\x003\x00a\x00b\x00c\x00', 6)
         assert codecs.getdecoder('utf-16')(
                 b'\xff\xfe1\x002\x003\x00a\x00b\x00c\x00') == (x, 14)
コード例 #8
0
ファイル: knowledge_base.py プロジェクト: log2timeline/plaso
  def SetCodepage(self, codepage):
    """Sets the codepage.

    Args:
      codepage (str): codepage.

    Raises:
      ValueError: if the codepage is not supported.
    """
    try:
      codecs.getencoder(codepage)
      self._codepage = codepage
    except LookupError:
      raise ValueError('Unsupported codepage: {0:s}'.format(codepage))
コード例 #9
0
ファイル: git_savvy.py プロジェクト: asdlei00/GitSavvy
 def reload_codecs():
     savvy_settings = sublime.load_settings("GitSavvy.sublime-settings")
     fallback_encoding = savvy_settings.get("fallback_encoding")
     try:
         import _multibytecodec, imp, codecs, encodings
         imp.reload(encodings)
         imp.reload(codecs)
         codecs.getencoder(fallback_encoding)
     except (ImportError, LookupError) as e:
         sublime.error_message(
             "You have enabled `load_additional_codecs` mode, but the "
             "`fallback_encoding` codec cannot load.  This probably means "
             "you don't have the Codecs33 package installed, or you've "
             "entered an unsupported encoding.")
コード例 #10
0
ファイル: __init__.py プロジェクト: draftcode/csvwrapper
def writer(csvfile, encoding, errors='strict', dialect='excel', **kwds):
    recoder = codecs.StreamRecoder(csvfile,
                                   codecs.getencoder('undefined'),
                                   codecs.getdecoder('utf_8'),
                                   codecs.getreader('undefined'),
                                   codecs.getwriter(encoding),
                                   errors)
    if hasattr(csvfile, 'close'):
        stream = csvfile
    else:
        stream = None

    writer = csv.writer(recoder, dialect=dialect, **kwds)
    return csvwriter(stream, writer, codecs.getencoder('utf_8'), errors)
コード例 #11
0
ファイル: schema.py プロジェクト: haikoschol/django
 def quote_value(self, value):
     try:
         value = _sqlite3.adapt(value)
     except _sqlite3.ProgrammingError:
         pass
     # Manual emulation of SQLite parameter quoting
     if isinstance(value, type(True)):
         return str(int(value))
     elif isinstance(value, (Decimal, float)):
         return str(value)
     elif isinstance(value, six.integer_types):
         return str(value)
     elif isinstance(value, six.string_types):
         return "'%s'" % six.text_type(value).replace("\'", "\'\'")
     elif value is None:
         return "NULL"
     elif isinstance(value, (bytes, bytearray, six.memoryview)):
         # Bytes are only allowed for BLOB fields, encoded as string
         # literals containing hexadecimal data and preceded by a single "X"
         # character:
         # value = b'\x01\x02' => value_hex = b'0102' => return X'0102'
         value = bytes(value)
         hex_encoder = codecs.getencoder('hex_codec')
         value_hex, _length = hex_encoder(value)
         # Use 'ascii' encoding for b'01' => '01', no need to use force_text here.
         return "X'%s'" % value_hex.decode('ascii')
     else:
         raise ValueError("Cannot quote parameter value %r of type %s" % (value, type(value)))
コード例 #12
0
ファイル: FSDigests.py プロジェクト: rstewart2702/misc
 def __initLocals__(self):
     # First, get the fields from the base class initialized:
     super(ActorObj2,self).__initLocals__()
     #
     # Next, initialize the special fields needed for this
     # particular kind of ActorObject, an ActorObj2:
     self.ucEncoder=codecs.getencoder("ascii")
コード例 #13
0
 def iter(self, fields, files):
     """
     fields is a sequence of (name, value) elements for regular form fields.
     files is a sequence of (name, filename, file-type) elements for data to be uploaded as files
     Yield body's chunk as bytes
     """
     encoder = codecs.getencoder('utf-8')
     for (key, value) in fields.items():
         key = self.u(key)
         yield encoder('--{}\r\n'.format(self.boundary))
         yield encoder(self.u('Content-Disposition: form-data; name="{}"\r\n').format(key))
         yield encoder('\r\n')
         if isinstance(value, int) or isinstance(value, float):
             value = str(value)
         yield encoder(self.u(value))
         yield encoder('\r\n')
     for (key, filename, fd) in files:
         key = self.u(key)
         filename = self.u(filename)
         yield encoder('--{}\r\n'.format(self.boundary))
         yield encoder(self.u('Content-Disposition: form-data; name="{}"; filename="{}"\r\n').format(key, filename))
         yield encoder('Content-Type: {}\r\n'.format(mimetypes.guess_type(filename)[0] or 'application/octet-stream'))
         yield encoder('\r\n')
         with fd:
             buff = fd.read()
             yield (buff, len(buff))
         yield encoder('\r\n')
     yield encoder('--{}--\r\n'.format(self.boundary))
コード例 #14
0
ファイル: utils.py プロジェクト: PierreBizouard/PyPDF2
def hexencode(b):
    if sys.version_info[0] < 3:
        return b.encode('hex')
    else:
        import codecs
        coder = codecs.getencoder('hex_codec')
        return coder(b)[0]
コード例 #15
0
ファイル: utils.py プロジェクト: sececter/pyhwp
def transcode(backend_stream, backend_encoding, frontend_encoding,
              errors='strict'):
    enc = codecs.getencoder(frontend_encoding)
    dec = codecs.getdecoder(frontend_encoding)
    rd = codecs.getreader(backend_encoding)
    wr = codecs.getwriter(backend_encoding)
    return codecs.StreamRecoder(backend_stream, enc, dec, rd, wr, errors)
コード例 #16
0
 def iter(self, fields, files):
     """
     fields is a sequence of (name, value) elements for regular form fields.
     files is a sequence of (name, file-like) elements for data
     to be uploaded as files.
     Yield body's chunk as bytes
     """
     encoder = codecs.getencoder("utf-8")
     for key, value in fields.iteritems():
         key = self.u(key)
         yield encoder("--{}\r\n".format(self.boundary))
         yield encoder(self.u('Content-Disposition: form-data; name="{}"\r\n').format(key))
         yield encoder("\r\n")
         if isinstance(value, int) or isinstance(value, float):
             value = str(value)
         yield encoder(self.u(value))
         yield encoder("\r\n")
     for key, value in files.iteritems():
         key = self.u(key)
         filename = self.u(value.name)
         yield encoder("--{}\r\n".format(self.boundary))
         yield encoder(self.u('Content-Disposition: form-data; name="{}"; filename="{}"\r\n').format(key, filename))
         yield encoder(
             "Content-Type: {}\r\n".format(mimetypes.guess_type(filename)[0] or "application/octet-stream")
         )
         yield encoder("\r\n")
         buff = value.read()
         yield (buff, len(buff))
         yield encoder("\r\n")
     yield encoder("--{}--\r\b".format(self.boundary))
コード例 #17
0
def writeDoc(x, h):
  f = open(x)
  t = f.read()
  f.close()

  doc = bp

  # Get the title
  xd = libxml2.parseFile(x)
  ctxt = xd.xpathNewContext()
  ctxt.xpathRegisterNs('html', 'http://www.w3.org/1999/xhtml')

  title = ctxt.xpathEvalExpression('string(/fvdoc//html:div[@id="message"])')

  title = trimWS(title)
  doc = doc.replace('<title></title>', '<title>' + title + '</title>')


  for (sec, txt) in secRe.findall(t):
    r = re.compile('<h2>' + sec + '</h2>\s*<div class="docbody">\s*()</div>', re.IGNORECASE)
    idx = r.search(doc).start(1)
    doc = doc[:idx] + txt + doc[idx:]

  c = codecs.getdecoder('utf-8')

  doc = c(doc)[0]

  c = codecs.getencoder('iso-8859-1')

  f = open(h, 'w')
  f.write(c(doc, 'xmlcharrefreplace')[0])
  f.close()
コード例 #18
0
ファイル: schema.py プロジェクト: cypreess/django
 def quote_value(self, value):
     # The backend "mostly works" without this function and there are use
     # cases for compiling Python without the sqlite3 libraries (e.g.
     # security hardening).
     try:
         import sqlite3
         value = sqlite3.adapt(value)
     except ImportError:
         pass
     except sqlite3.ProgrammingError:
         pass
     # Manual emulation of SQLite parameter quoting
     if isinstance(value, type(True)):
         return str(int(value))
     elif isinstance(value, (Decimal, float, int)):
         return str(value)
     elif isinstance(value, str):
         return "'%s'" % value.replace("\'", "\'\'")
     elif value is None:
         return "NULL"
     elif isinstance(value, (bytes, bytearray, memoryview)):
         # Bytes are only allowed for BLOB fields, encoded as string
         # literals containing hexadecimal data and preceded by a single "X"
         # character:
         # value = b'\x01\x02' => value_hex = b'0102' => return X'0102'
         value = bytes(value)
         hex_encoder = codecs.getencoder('hex_codec')
         value_hex, _length = hex_encoder(value)
         # Use 'ascii' encoding for b'01' => '01', no need to use force_text here.
         return "X'%s'" % value_hex.decode('ascii')
     else:
         raise ValueError("Cannot quote parameter value %r of type %s" % (value, type(value)))
コード例 #19
0
 def iter(self, fields, files):
     """
     fields is a dict of {name: value} for regular form fields.
     files is a dict of {name: (filename, file-type)} for data to be uploaded as files
     Yield body's chunk as bytes
     """
     encoder = codecs.getencoder('utf-8')
     for (key, value) in fields.items():
         key = self.u(key)
         yield encoder('--{}\r\n'.format(self.boundary))
         yield encoder(self.u('Content-Disposition: form-data; name="{}"\r\n').format(key))
         yield encoder('\r\n')
         if isinstance(value, int) or isinstance(value, float):
             value = str(value)
         yield encoder(self.u(value))
         yield encoder('\r\n')
     for (key, filename_and_f) in files.items():
         filename, f = filename_and_f
         key = self.u(key)
         filename = self.u(filename)
         yield encoder('--{}\r\n'.format(self.boundary))
         yield encoder(self.u('Content-Disposition: form-data; name="{}"; filename="{}"\r\n').format(key, filename))
         yield encoder('Content-Type: application/octet-stream\r\n')
         yield encoder('\r\n')
         data = f.read()
         yield (data, len(data))
         yield encoder('\r\n')
     yield encoder('--{}--\r\n'.format(self.boundary))
コード例 #20
0
ファイル: filestructure.py プロジェクト: changwoo/pyhwp
 def recode(backend_stream):
     import codecs
     enc = codecs.getencoder(frontend_encoding)
     dec = codecs.getdecoder(frontend_encoding)
     rd = codecs.getreader(backend_encoding)
     wr = codecs.getwriter(backend_encoding)
     return codecs.StreamRecoder(backend_stream, enc, dec, rd, wr, errors)
コード例 #21
0
 def __get_geopoint(self):
     key = self.id + '_geopoint'
     if not self.__geopoint:
         self.__geopoint = memcache.get(key)
     if not self.__geopoint:
         # try to get geopoint through douban api
         service = DoubanService('04ea268dfa658bd80ebe73a9ef30a388')
         try:
             # don't know why, but failed to get event feed in GAE
             feed = service.GetEvent(self.url)
             for elem in feed.extension_elements:
                 if elem.tag == 'point':
                     self.__geopoint = [float(p) for p in elem.text.split()]
             memcache.set(key, self.__geopoint)
         except:
             pass
     if not self.__geopoint:
         # try to get geopoint through google map api
         location = codecs.getencoder('utf8')(self.location)[0]
         url = 'http://maps.google.com/maps/api/geocode/json?address=%s&sensor=false' % urllib.quote(location) 
         try:
             # Google map api might be blocked by GFW in China, try/except to ignore such download failure
             reader = urllib.urlopen(url)
             result = simplejson.load(reader)
             if 'OK' == result['status'] :
                 geopoint = result['results'][0]['geometry']['location']
                 self.__geopoint = (geopoint['lat'], geopoint['lng'])
             memcache.set(key, self.__geopoint)
         except:
             pass
     if not self.__geopoint:
         # failed to get geopoint
         self.__geopoint = (None, None)
     return self.__geopoint
コード例 #22
0
 def test_basics(self):
     s = "abc123"
     for encoding in all_string_encodings:
         (bytes, size) = codecs.getencoder(encoding)(s)
         self.assertEqual(size, len(s))
         (chars, size) = codecs.getdecoder(encoding)(bytes)
         self.assertEqual(chars, s, "%r != %r (encoding=%r)" % (chars, s, encoding))
コード例 #23
0
ファイル: loader.py プロジェクト: Aili1004/RateItRepo
  def _GetUtf8Contents(self, file_name):
    """Check for errors in file_name and return a string for csv reader."""
    contents = self._FileContents(file_name)
    if not contents:  # Missing file
      return

    # Check for errors that will prevent csv.reader from working
    if len(contents) >= 2 and contents[0:2] in (codecs.BOM_UTF16_BE,
        codecs.BOM_UTF16_LE):
      self._problems.FileFormat("appears to be encoded in utf-16", (file_name, ))
      # Convert and continue, so we can find more errors
      contents = codecs.getdecoder('utf-16')(contents)[0].encode('utf-8')

    null_index = contents.find('\0')
    if null_index != -1:
      # It is easier to get some surrounding text than calculate the exact
      # row_num
      m = re.search(r'.{,20}\0.{,20}', contents, re.DOTALL)
      self._problems.FileFormat(
          "contains a null in text \"%s\" at byte %d" %
          (codecs.getencoder('string_escape')(m.group()), null_index + 1),
          (file_name, ))
      return

    # strip out any UTF-8 Byte Order Marker (otherwise it'll be
    # treated as part of the first column name, causing a mis-parse)
    contents = contents.lstrip(codecs.BOM_UTF8)
    return contents
コード例 #24
0
ファイル: repl.py プロジェクト: RolandXu/Sublime2
 def __init__(self, encoding, external_id=None, cmd_postfix="\n", suppress_echo=False):
     self.id = uuid4().hex
     self.decoder = getincrementaldecoder(encoding)()
     self.encoder = getencoder(encoding)
     self.external_id = external_id
     self.cmd_postfix = cmd_postfix
     self.suppress_echo = suppress_echo
コード例 #25
0
 def __init__(self, f, dialect=csv.excel, encoding="utf-8", **kwds):
     # Redirect output to a queue
     self.queue = cStringIO.StringIO()
     self.writer = csv.writer(self.queue, dialect=dialect, **kwds)
     self.stream = f
     self.encoder = codecs.getencoder(encoding)
     self.encoding = encoding
コード例 #26
0
ファイル: textwriter.py プロジェクト: abed-hawa/amara
 def start_document(self):
     params = self.output_parameters
     params.setdefault('media_type', 'text/plain')
     # the default is actually system-dependent; we'll use UTF-8
     encoding = params.setdefault('encoding', 'UTF-8')
     self._encode = codecs.getencoder(encoding)
     return
コード例 #27
0
ファイル: ago.py プロジェクト: ivn888/sample-gp-tools
    def iter(self, fields, files):
        """
        Yield bytes for body. See class description for usage.
        """
        encoder = codecs.getencoder('utf-8')
        for key, value in fields.items():
            yield encoder('--{}\r\n'.format(self.boundary))
            yield encoder(
                self.u('Content-Disposition: form-data; name="{}"\r\n').format(key))
            yield encoder('\r\n')
            if isinstance(value, int) or isinstance(value, float):
                value = str(value)
            yield encoder(self.u(value))
            yield encoder('\r\n')

        for key, value in files.items():
            if "filename" in value:
                filename = value.get("filename")
                content_disp = 'Content-Disposition: form-data;name=' + \
                               '"{}"; filename="{}"\r\n'.format(key, filename)
                content_type = 'Content-Type: {}\r\n'.format(
                    mimetypes.guess_type(filename)[0] or 'application/octet-stream')
                yield encoder('--{}\r\n'.format(self.boundary))
                yield encoder(content_disp)
                yield encoder(content_type)
            yield encoder('\r\n')
            if "content" in value:
                buff = value.get("content")
                yield (buff, len(buff))
            yield encoder('\r\n')

        yield encoder('--{}--\r\n'.format(self.boundary))
コード例 #28
0
    def _get_log_name_for_download(self, log_id):
        table = [
            22136, 52719, 55146, 42104,
            59591, 46934, 9248,  28891,
            49597, 52974, 62844, 4015,
            18311, 50730, 43056, 17939,
            64838, 38145, 27008, 39128,
            35652, 63407, 65535, 23473,
            35164, 55230, 27536, 4386,
            64920, 29075, 42617, 17294,
            18868, 2081
        ]

        code_pos = log_id.rindex("-") + 1
        code = log_id[code_pos:]
        if code[0] == 'x':
            a, b, c = struct.unpack(">HHH", bytes.fromhex(code[1:]))
            index = 0
            if log_id[:12] > "2010041111gm":
                x = int("3" + log_id[4:10])
                y = int(log_id[9])
                index = x % (33 - y)
            first = (a ^ b ^ table[index]) & 0xFFFF
            second = (b ^ c ^ table[index] ^ table[index + 1]) & 0xFFFF
            return log_id[:code_pos] + codecs.getencoder('hex_codec')(struct.pack(">HH", first, second))[0].decode('ASCII')
        else:
            return log_id
コード例 #29
0
def main():

    # Taking corrupted base64 decoded file from the user
    file_name=input("Enter the name of file you want to extract data from:  ")
    x=open(file_name, "rb")
    s=x.read()
    d=base64.standard_b64decode(s)
    hexlify=codecs.getencoder('hex')
    z=hexlify(d)[0]

    # Saving the decoded contents to a text file
    f=open("base64_decoded.txt","w")
    f.write(str(z))
    f.close()

    #Calling extract function
    a=extract("ffd8","ffd9","base64_decoded.txt","output1.txt")
    b=extract("255044462d312e", "2525454f46","base64_decoded.txt","output2.txt")
    c=extract("89504e470d0a1a0a","49454e44ae426082","base64_decoded.txt","output3.txt")
    d=extract("474946383961","00003b","base64_decoded.txt","output4.txt")
    e=extract("504b0304","504b0506","base64_decoded.txt","output5.txt")

    #a,b,c,d,e has extracted file in hex formats

    #Calling hex_to_ascii function and passing extension
    hex_to_ascii(a,"secret.jpeg","JPEG")
    hex_to_ascii(b,"Confidential.pdf","PDF")
    hex_to_ascii(c,"secure.png","PNG")
    hex_to_ascii(d,"clue.gif","GIF")
    hex_to_ascii(e,"hidden.docx","Docx")
コード例 #30
0
    def get(self, request, project_slug, dataset_slug, **kwargs):
        #models.Message

        # find our dataset & messages
        dataset = models.Dataset.objects.filter(slug=dataset_slug)
        messages = models.Message.objects.filter(dataset=dataset)

        # begin a streaming response
        response = HttpResponse(content_type='text/csv')
        response['Content-Disposition'] = 'attachment; filename="%s-%s.csv"'%(project_slug, dataset_slug)

        # attach a csv writer to that
        csvwriter = csv.writer(response)
        csvwriter.writerow(["id", "time", "sender", "text", "codes_str"])

        # create a utf-8 encoder
        encoder = codecs.getencoder("utf-8")

        #step through messages
        for m in messages:

            # grab all instances attacahed to this 
            codes = project_models.CodeInstance.objects.filter(message=m)
            codes_str =  u"|".join([unicode(c.code.name) for c in codes]) if codes is not None and codes.count() > 0 else ""

            # create the column values for our row
            cols = [unicode(m.id), unicode(m.time), unicode(m.sender),  unicode(m.text), codes_str]

            # encode and write them
            encoded_cols = [encoder(c)[0] for c in cols]
            csvwriter.writerow(encoded_cols)

        # return the full response
        return response
コード例 #31
0
    def __init__(self,
                 url,
                 persistent=True,
                 timeout=None,
                 ssl_key=None,
                 ssl_cert=None,
                 post_headers={}):
        """
            url -- URI pointing to the SOLR instance. Examples:

                http://localhost:8080/solr
                https://solr-server/solr

                Your python install must be compiled with SSL support for the
                https:// schemes to work. (Most pre-packaged pythons are.)

            persistent -- Keep a persistent HTTP connection open.
                Defaults to true

            timeout -- Timeout, in seconds, for the server to response.
                By default, use the python default timeout (of none?)

            ssl_key, ssl_cert -- If using client-side key files for
                SSL authentication,  these should be, respectively,
                your PEM key file and certificate file

        """

        self.scheme, self.host, self.path = urlparse.urlparse(url, 'http')[:3]
        self.url = url

        assert self.scheme in ('http', 'https')

        self.persistent = persistent
        self.reconnects = 0
        self.timeout = timeout
        self.ssl_key = ssl_key
        self.ssl_cert = ssl_cert

        kwargs = {}

        if self.timeout and _python_version >= 2.6 and _python_version < 3:
            kwargs['timeout'] = self.timeout

        if self.scheme == 'https':
            self.conn = httplib.HTTPSConnection(self.host,
                                                key_file=ssl_key,
                                                cert_file=ssl_cert,
                                                **kwargs)
        else:
            self.conn = httplib.HTTPConnection(self.host, **kwargs)

        # this is int, not bool!
        self.batch_cnt = 0
        self.response_version = 2.2
        self.encoder = codecs.getencoder('utf-8')

        # Responses from Solr will always be in UTF-8
        self.decoder = codecs.getdecoder('utf-8')

        # Set timeout, if applicable.
        if self.timeout and _python_version < 2.6:
            self.conn.connect()
            if self.scheme == 'http':
                self.conn.sock.settimeout(self.timeout)
            elif self.scheme == 'https':
                self.conn.sock.sock.settimeout(self.timeout)

        self.xmlheaders = {'Content-Type': 'text/xml; charset=utf-8'}
        self.xmlheaders.update(post_headers)
        if not self.persistent:
            self.xmlheaders['Connection'] = 'close'

        self.form_headers = {
            'Content-Type': 'application/x-www-form-urlencoded; charset=utf-8'
        }

        if not self.persistent:
            self.form_headers['Connection'] = 'close'
コード例 #32
0
    def test_basic_spans_emit(self):
        hex_encoder = codecs.getencoder('hex')
        client = mock.Mock()
        client.Export.return_value = iter([1])

        span_data0 = span_data_module.SpanData(
            name="name0",
            context=span_context_module.SpanContext(
                trace_id='0e0c63257de34c92bf9efcd03927272e'),
            span_id='0e0c63257de34c92',
            parent_span_id=None,
            start_time=None,
            end_time=None,
            attributes=None,
            child_span_count=None,
            stack_trace=None,
            time_events=None,
            links=None,
            status=None,
            same_process_as_parent_span=None,
            span_kind=0)

        span_data1 = span_data_module.SpanData(
            name="name1",
            context=span_context_module.SpanContext(
                trace_id='1e0c63257de34c92bf9efcd03927272e'),
            span_id='1e0c63257de34c92',
            parent_span_id=None,
            start_time=None,
            end_time=None,
            attributes=None,
            child_span_count=None,
            stack_trace=None,
            time_events=None,
            links=None,
            status=None,
            same_process_as_parent_span=None,
            span_kind=0)

        exporter = TraceExporter(service_name=SERVICE_NAME,
                                 client=client,
                                 transport=MockTransport)

        exporter.emit([span_data0])

        actual_request0 = list(client.Export.call_args[0][0])[0]
        self.assertEqual(actual_request0.node, exporter.node)

        pb_span0 = actual_request0.spans[0]
        self.assertEqual(pb_span0.name.value, "name0")
        self.assertEqual(
            hex_encoder(pb_span0.trace_id)[0],
            b'0e0c63257de34c92bf9efcd03927272e')
        self.assertEqual(hex_encoder(pb_span0.span_id)[0], b'0e0c63257de34c92')

        exporter.emit([span_data1])

        self.assertEqual(len(client.Export.mock_calls), 2)
        actual_request1 = list(client.Export.call_args[0][0])[0]
        self.assertEqual(actual_request1.node, exporter.node)
        pb_span1 = actual_request1.spans[0]
        self.assertEqual(pb_span1.name.value, "name1")
        self.assertEqual(
            hex_encoder(pb_span1.trace_id)[0],
            b'1e0c63257de34c92bf9efcd03927272e')
        self.assertEqual(hex_encoder(pb_span1.span_id)[0], b'1e0c63257de34c92')
コード例 #33
0
ファイル: xmlEncoding.py プロジェクト: w3c/feedvalidator
        return enc, dec(bs)[0]
    except UnicodeDecodeError as ue:
        salvage = dec(bs, 'replace')[0]
        if 'start' in ue.__dict__:
            # XXX 'start' is in bytes, not characters. This is wrong for multibyte
            #  encodings
            pos = _position(salvage, ue.start)
        else:
            pos = None

        _logEvent(loggedEvents, logging.UnicodeError({"exception": ue}), pos)

        return enc, salvage


_encUTF8 = codecs.getencoder('UTF-8')


def asUTF8(x):
    """Accept a Unicode string and return a UTF-8 encoded string, with
  its encoding declaration removed, suitable for parsing."""
    x = removeDeclaration(str(x))
    return _encUTF8(x)[0]


if __name__ == '__main__':
    from sys import argv
    from os.path import isfile

    for x in argv[1:]:
        if isfile(x):
コード例 #34
0
 def _bytes_to_hex(bs):
     return codecs.getencoder('hex')(bs)[0]
コード例 #35
0
            encode=InvertCapsCodec().encode,
            decode=InvertCapsCodec().decode,
            incrementalencoder=InvertCapsIncrementalEncoder,
            incrementaldecoder=InvertCapsIncrementalDecoder,
            streamreader=InvertCapsStreamReader,
            streamwriter=InvertCapsStreamWriter,
        )
    return None


codecs.register(find_invertcaps)

if __name__ == '__main__':

    # Stateless encoder/decoder
    encoder = codecs.getencoder('invertcaps')
    text = 'abcDEF'
    encoded_text, consumed = encoder(text)
    print('Encoded "{}" to "{}", consuming {} characters'.format(
        text, encoded_text, consumed))

    # Stream writer
    import io
    buffer = io.BytesIO()
    writer = codecs.getwriter('invertcaps')(buffer)
    print('StreamWriter for io buffer: ')
    print('  writing "abcDEF"')
    writer.write('abcDEF')
    print('  buffer contents: ', buffer.getvalue())

    # Incremental decoder
コード例 #36
0
ファイル: text.py プロジェクト: ciscorn/ldoce5viewer
import codecs
import re
import unicodedata

_utf8_encoder = codecs.getencoder("utf-8")
_utf8_decoder = codecs.getdecoder("utf-8")
_unicode_normalize = unicodedata.normalize
_unicode_category = unicodedata.category

MATCH_OPEN_TAG = re.compile(r"\<([^\/]+?)\>")
MATCH_CLOSE_TAG = re.compile(r"\<(\/.+?)\>")


def enc_utf8(s):
    return _utf8_encoder(s)[0]


def dec_utf8(s):
    return _utf8_decoder(s)[0]


def normalize_token(t):
    key = t.replace(u"\u00A9", u"c")

    def is_not_mn(c):
        cat = _unicode_category(c)
        return cat != "Mn"

    return u"".join(c for c in _unicode_normalize(u"NFKD", key)
                    if is_not_mn(c))
コード例 #37
0
def hex(stuff):
    return codecs.getencoder('hex')(stuff)[0].decode("utf-8")
コード例 #38
0
def encrypt_file(file_name, key):
    with open(file_name, 'rb') as fo:
        plaintext = fo.read()
    enc = encrypt(plaintext, key)
    with open("enc_"+ file_name, 'wb') as fo:
        fo.write(enc)

#get (p,g,h)
file = open('PublicKey.txt', 'r');
p = int(file.readlines()[0])
file = open('PublicKey.txt', 'r');
g = int(file.readlines()[1])
file = open('PublicKey.txt', 'r');
h = int(file.readlines()[2])

hexlify = codecs.getencoder('hex')
key = hexlify(os.urandom(16))[0];


inpfile = input("Enter the name of the file you wish to encrypt along with the extension (For example: test.jpeg): ")
C1 =encrypt_file(inpfile, key) #add user input for asking the filename
Kdash = int(key, 16)
r = random.randint(0,p-2)
C2 = squareAndMultipy(g,r,p)
C3 = squareAndMultipy(h,r,p)
C3 = squareAndMultipy(Kdash*C3,1,p)


target = open("Cipher.txt",'w');
target.write("%s" %C2);
target.write("\n");
コード例 #39
0
ファイル: ConvertToUTF8.py プロジェクト: Jornathon0/sublime3
	def run(self, edit, encoding=None, stamp=None, detect_on_fail=False):
		view = self.view
		if encoding:
			view.settings().set('force_encoding', encoding)
			origin_encoding = view.settings().get('origin_encoding')
			# convert only when ST can't load file properly
			run_convert = (view.encoding() == view.settings().get('fallback_encoding'))
			if origin_encoding:
				if origin_encoding == encoding:
					return
				view.set_scratch(False)
				run_convert = False
			init_encoding_vars(view, encoding, run_convert, stamp)
			return
		else:
			encoding = view.settings().get('origin_encoding')
		if not encoding:
			return
		file_name = view.file_name()
		if not (file_name and os.path.exists(file_name)):
			return
		# try fast decode
		fp = None
		try:
			fp = codecs.open(file_name, 'rb', encoding, errors='strict')
			contents = fp.read()
		except LookupError as e:
			try:
				# reload codecs
				import _multibytecodec, imp, encodings
				imp.reload(encodings)
				imp.reload(codecs)
				codecs.getencoder(encoding)
				view.run_command('reload_with_encoding', {'encoding': encoding})
			except (ImportError, LookupError) as e:
				need_codecs = (type(e) == ImportError)
				clean_encoding_vars(view)
				view.window().new_file().run_command('py_instruction', {'encoding': encoding, 'file_name': file_name, 'need_codecs': need_codecs})
			return
		except UnicodeDecodeError as e:
			if detect_on_fail:
				detect(view, file_name, get_setting(view, 'max_detect_lines'))
				return
			superset = SUPERSETS.get(encoding)
			if superset:
				print('Try encoding {0} instead of {1}.'.format(superset, encoding))
				init_encoding_vars(view, superset, True, stamp)
				return
			if CONFIRM_IS_AVAILABLE:
				if sublime.ok_cancel_dialog(u'Errors occurred while converting {0} with {1} encoding.\n\n'
						'WARNING: Continue to load this file using {1}, malformed data will be ignored.'
						'\n\nPress "Cancel" to choose another encoding manually.'.format
						(os.path.basename(file_name), encoding)):
					fp.close()
					fp = codecs.open(file_name, 'rb', encoding, errors='ignore')
					contents = fp.read()
				else:
					show_selection(view)
					return
			else:
				view.set_status('origin_encoding', u'Errors occurred while converting {0} with {1} encoding'.format
						(os.path.basename(file_name), encoding))
				show_selection(view)
				return
		finally:
			if fp:
				fp.close()
		encoding_cache.set(file_name, encoding)
		contents = contents.replace('\r\n', '\n').replace('\r', '\n')
		regions = sublime.Region(0, view.size())
		sel = view.sel()
		rs = [(view.rowcol(x.a), view.rowcol(x.b)) for x in sel]
		vp = view.viewport_position()
		view.set_viewport_position((0, 0), False)
		view.replace(edit, regions, contents)
		sel.clear()
		for x in rs:
			sel.add(self.find_region(x))
		view.set_viewport_position(vp, False)
		stamps[file_name] = stamp
		sublime.status_message('{0} -> UTF8'.format(encoding))
コード例 #40
0
import logging

# Import salt libs
import salt.utils
import salt.utils.files
from salt.utils.odict import OrderedDict
from salt._compat import string_io
from salt.ext.six import string_types

log = logging.getLogger(__name__)

#FIXME: we should make the default encoding of a .sls file a configurable
#       option in the config, and default it to 'utf-8'.
#
SLS_ENCODING = 'utf-8'  # this one has no BOM.
SLS_ENCODER = codecs.getencoder(SLS_ENCODING)


def compile_template(template,
                     renderers,
                     default,
                     blacklist,
                     whitelist,
                     saltenv='base',
                     sls='',
                     input_data='',
                     **kwargs):
    '''
    Take the path to a template and return the high data structure
    derived from the template.
    '''
コード例 #41
0
def rot13(input):
    s = input
    enc = codecs.getencoder("rot-13")
    os = enc(s)[0]

    return os
コード例 #42
0
ファイル: __init__.py プロジェクト: wptoux/stickytape
def _string_escape(string):
    return "'''{0}'''".format(
        codecs.getencoder(_py_string_encoding)(string)[0].decode("ascii"))
コード例 #43
0
# -*- coding: utf-8 -*-
from __future__ import print_function
import codecs
# import big5uao
import big5uao_3

# a煊b喆c凜凛
test = b'a\x95\x4Fb\x95\xEDc\xBB\xFE\x81\x60'

dec = codecs.getdecoder('big5uao')
enc = codecs.getencoder('big5uao')

d0, d1 = dec(test)
print(d0, d1)

test = b'\x95\x4F\x80\x80'
d0, d1 = dec(test, errors='replace')
print(d0, d1)

# 應是Big5+UAO的 0xa7 0x41 0xa6 0x6e
s = '你好'  # 2.x u'你好'      3.x '你好'

e = enc(s)
for b in e[0]:
    print('%x ' % b, end='')
print(e[1])

s = '\uEEEE\uDDDD'
e = enc(s, errors='strict')
for b in e[0]:
    print('%x ' % b, end='')
コード例 #44
0
ファイル: default.py プロジェクト: darencorp/sudoku_solver
    def __init__(self,
                 convert_unicode=False,
                 encoding='utf-8',
                 paramstyle=None,
                 dbapi=None,
                 implicit_returning=None,
                 supports_right_nested_joins=None,
                 case_sensitive=True,
                 supports_native_boolean=None,
                 empty_in_strategy='static',
                 label_length=None,
                 **kwargs):

        if not getattr(self, 'ported_sqla_06', True):
            util.warn("The %s dialect is not yet ported to the 0.6 format" %
                      self.name)

        self.convert_unicode = convert_unicode
        self.encoding = encoding
        self.positional = False
        self._ischema = None
        self.dbapi = dbapi
        if paramstyle is not None:
            self.paramstyle = paramstyle
        elif self.dbapi is not None:
            self.paramstyle = self.dbapi.paramstyle
        else:
            self.paramstyle = self.default_paramstyle
        if implicit_returning is not None:
            self.implicit_returning = implicit_returning
        self.positional = self.paramstyle in ('qmark', 'format', 'numeric')
        self.identifier_preparer = self.preparer(self)
        self.type_compiler = self.type_compiler(self)
        if supports_right_nested_joins is not None:
            self.supports_right_nested_joins = supports_right_nested_joins
        if supports_native_boolean is not None:
            self.supports_native_boolean = supports_native_boolean
        self.case_sensitive = case_sensitive

        self.empty_in_strategy = empty_in_strategy
        if empty_in_strategy == 'static':
            self._use_static_in = True
        elif empty_in_strategy in ('dynamic', 'dynamic_warn'):
            self._use_static_in = False
            self._warn_on_empty_in = empty_in_strategy == 'dynamic_warn'
        else:
            raise exc.ArgumentError("empty_in_strategy may be 'static', "
                                    "'dynamic', or 'dynamic_warn'")

        if label_length and label_length > self.max_identifier_length:
            raise exc.ArgumentError(
                "Label length of %d is greater than this dialect's"
                " maximum identifier length of %d" %
                (label_length, self.max_identifier_length))
        self.label_length = label_length

        if self.description_encoding == 'use_encoding':
            self._description_decoder = \
                processors.to_unicode_processor_factory(
                    encoding
                )
        elif self.description_encoding is not None:
            self._description_decoder = \
                processors.to_unicode_processor_factory(
                    self.description_encoding
                )
        self._encoder = codecs.getencoder(self.encoding)
        self._decoder = processors.to_unicode_processor_factory(self.encoding)
コード例 #45
0
def hex_encode(data):
    return getencoder('hex')(data)[0].decode('ascii')
コード例 #46
0
import codecs
import hashlib
import random
import unittest

from mozharness.mozilla.merkle import InclusionProof, MerkleTree

decode_hex = codecs.getdecoder("hex_codec")
encode_hex = codecs.getencoder("hex_codec")

# Pre-computed tree on 7 inputs
#
#         ______F_____
#        /            \
#     __D__           _E_
#    /     \         /   \
#   A       B       C     |
#  / \     / \     / \    |
# 0   1   2   3   4   5   6
hash_fn = hashlib.sha256

data = [
    decode_hex("fbc459361fc111024c6d1fd83d23a9ff")[0],
    decode_hex("ae3a44925afec860451cd8658b3cadde")[0],
    decode_hex("418903fe6ef29fc8cab93d778a7b018b")[0],
    decode_hex("3d1c53c00b2e137af8c4c23a06388c6b")[0],
    decode_hex("e656ebd8e2758bc72599e5896be357be")[0],
    decode_hex("81aae91cf90be172eedd1c75c349bf9e")[0],
    decode_hex("00c262edf8b0bc345aca769e8733e25e")[0],
]
コード例 #47
0
 def __post_init__(self, encoding):
     self._encoder = codecs.getencoder(encoding)
コード例 #48
0
class Filter(callbacks.Plugin):
    """This plugin offers several commands which transform text in some way.
    It also provides the capability of using such commands to 'filter' the
    output of the bot -- for instance, you could make everything the bot says
    be in leetspeak, or Morse code, or any number of other kinds of filters.
    Not very useful, but definitely quite fun :)"""
    def __init__(self, irc):
        self.__parent = super(Filter, self)
        self.__parent.__init__(irc)
        self.outFilters = ircutils.IrcDict()

    def outFilter(self, irc, msg):
        if msg.command in ('PRIVMSG', 'NOTICE'):
            if msg.channel in self.outFilters:
                if ircmsgs.isAction(msg):
                    s = ircmsgs.unAction(msg)
                else:
                    s = msg.args[1]
                methods = self.outFilters[msg.channel]
                for filtercommand in methods:
                    myIrc = MyFilterProxy()
                    filtercommand(myIrc, msg, [s])
                    s = myIrc.s
                if ircmsgs.isAction(msg):
                    msg = ircmsgs.action(msg.args[0], s, msg=msg)
                else:
                    msg = ircmsgs.IrcMsg(msg=msg, args=(msg.args[0], s))
        return msg

    _filterCommands = [
        'jeffk', 'leet', 'rot13', 'hexlify', 'binary', 'scramble', 'morse',
        'reverse', 'colorize', 'squish', 'supa1337', 'stripcolor', 'aol',
        'rainbow', 'spellit', 'hebrew', 'undup', 'uwu', 'gnu', 'shrink',
        'uniud', 'capwords', 'caps', 'vowelrot', 'stripformatting'
    ]

    @internationalizeDocstring
    def outfilter(self, irc, msg, args, channel, command):
        """[<channel>] [<command>]

        Sets the outFilter of this plugin to be <command>.  If no command is
        given, unsets the outFilter.  <channel> is only necessary if the
        message isn't sent in the channel itself.
        """
        if command:
            if not self.isDisabled(command) and \
               command in self._filterCommands:
                method = getattr(self, command)
                self.outFilters.setdefault(channel, []).append(method)
                irc.replySuccess()
            else:
                irc.error(_('That\'s not a valid filter command.'))
        else:
            self.outFilters[channel] = []
            irc.replySuccess()

    outfilter = wrap(outfilter, [('checkChannelCapability', 'op'),
                                 additional('commandName')])

    _hebrew_remover = utils.str.MultipleRemover('aeiou')

    @internationalizeDocstring
    def hebrew(self, irc, msg, args, text):
        """<text>

        Removes all the vowels from <text>.  (If you're curious why this is
        named 'hebrew' it's because I (jemfinch) thought of it in Hebrew class,
        and printed Hebrew often elides the vowels.)
        """
        irc.reply(self._hebrew_remover(text))

    hebrew = wrap(hebrew, ['text'])

    def _squish(self, text):
        return text.replace(' ', '')

    @internationalizeDocstring
    def squish(self, irc, msg, args, text):
        """<text>

        Removes all the spaces from <text>.
        """
        irc.reply(self._squish(text))

    squish = wrap(squish, ['text'])

    @internationalizeDocstring
    def undup(self, irc, msg, args, text):
        """<text>

        Returns <text>, with all consecutive duplicated letters removed.
        """
        L = [text[0]]
        for c in text:
            if c != L[-1]:
                L.append(c)
        irc.reply(''.join(L))

    undup = wrap(undup, ['text'])

    @internationalizeDocstring
    def binary(self, irc, msg, args, text):
        """<text>

        Returns the binary representation of <text>.
        """
        L = []
        if minisix.PY3:
            if isinstance(text, str):
                bytes_ = text.encode()
            else:
                bytes_ = text
        else:
            if isinstance(text, unicode):
                text = text.encode()
            bytes_ = map(ord, text)
        for i in bytes_:
            LL = []
            assert i <= 256
            counter = 8
            while i:
                counter -= 1
                if i & 1:
                    LL.append('1')
                else:
                    LL.append('0')
                i >>= 1
            while counter:
                LL.append('0')
                counter -= 1
            LL.reverse()
            L.extend(LL)
        irc.reply(''.join(L))

    binary = wrap(binary, ['text'])

    def unbinary(self, irc, msg, args, text):
        """<text>

        Returns the character representation of binary <text>.
        Assumes ASCII, 8 digits per character.
        """
        text = self._squish(text)  # Strip spaces.
        try:
            L = [chr(int(text[i:(i + 8)], 2)) for i in range(0, len(text), 8)]
            irc.reply(''.join(L))
        except ValueError:
            irc.errorInvalid('binary string', text)

    unbinary = wrap(unbinary, ['text'])

    _hex_encoder = staticmethod(codecs.getencoder('hex_codec'))

    def hexlify(self, irc, msg, args, text):
        """<text>

        Returns a hexstring from the given string; a hexstring is a string
        composed of the hexadecimal value of each character in the string
        """
        irc.reply(self._hex_encoder(text.encode('utf8'))[0].decode('utf8'))

    hexlify = wrap(hexlify, ['text'])

    _hex_decoder = staticmethod(codecs.getdecoder('hex_codec'))

    @internationalizeDocstring
    def unhexlify(self, irc, msg, args, text):
        """<hexstring>

        Returns the string corresponding to <hexstring>.  Obviously,
        <hexstring> must be a string of hexadecimal digits.
        """
        try:
            irc.reply(
                self._hex_decoder(text.encode('utf8'))[0].decode(
                    'utf8', 'replace'))
        except TypeError:
            irc.error(_('Invalid input.'))

    unhexlify = wrap(unhexlify, ['text'])

    _rot13_encoder = codecs.getencoder('rot-13')

    @internationalizeDocstring
    def rot13(self, irc, msg, args, text):
        """<text>

        Rotates <text> 13 characters to the right in the alphabet.  Rot13 is
        commonly used for text that simply needs to be hidden from inadvertent
        reading by roaming eyes, since it's easily reversible.
        """
        if minisix.PY2:
            text = text.decode('utf8')
        irc.reply(self._rot13_encoder(text)[0])

    rot13 = wrap(rot13, ['text'])

    _leettrans = utils.str.MultipleReplacer(
        dict(list(zip('oOaAeElBTiIts', '004433187!1+5'))))
    _leetres = [
        (re.compile(r'\b(?:(?:[yY][o0O][oO0uU])|u)\b'), 'j00'),
        (re.compile(r'fear'), 'ph33r'),
        (re.compile(r'[aA][tT][eE]'), '8'),
        (re.compile(r'[aA][tT]'), '@'),
        (re.compile(r'[sS]\b'), 'z'),
        (re.compile(r'x'), '><'),
    ]

    @internationalizeDocstring
    def leet(self, irc, msg, args, text):
        """<text>

        Returns the l33tspeak version of <text>
        """
        for (r, sub) in self._leetres:
            text = re.sub(r, sub, text)
        text = self._leettrans(text)
        irc.reply(text)

    leet = wrap(leet, ['text'])

    _supaleetreplacers = [
        ('xX', '><'),
        ('kK', '|<'),
        ('rR', '|2'),
        ('hH', '|-|'),
        ('L', '|_'),
        ('uU', '|_|'),
        ('O', '()'),
        ('nN', '|\\|'),
        ('mM', '/\\/\\'),
        ('G', '6'),
        ('Ss', '$'),
        ('i', ';'),
        ('aA', '/-\\'),
        ('eE', '3'),
        ('t', '+'),
        ('T', '7'),
        ('l', '1'),
        ('D', '|)'),
        ('B', '|3'),
        ('I', ']['),
        ('Vv', '\\/'),
        ('wW', '\\/\\/'),
        ('d', 'c|'),
        ('b', '|>'),
        ('c', '<'),
        ('h', '|n'),
    ]

    @internationalizeDocstring
    def supa1337(self, irc, msg, args, text):
        """<text>

        Replies with an especially k-rad translation of <text>.
        """
        for (r, sub) in self._leetres:
            text = re.sub(r, sub, text)
        for (letters, replacement) in self._supaleetreplacers:
            for letter in letters:
                text = text.replace(letter, replacement)
        irc.reply(text)

    supa1337 = wrap(supa1337, ['text'])

    _scrambleRe = re.compile(r'(?:\b|(?![a-zA-Z]))([a-zA-Z])([a-zA-Z]*)'
                             r'([a-zA-Z])(?:\b|(?![a-zA-Z]))')

    @internationalizeDocstring
    def scramble(self, irc, msg, args, text):
        """<text>

        Replies with a string where each word is scrambled; i.e., each internal
        letter (that is, all letters but the first and last) are shuffled.
        """
        def _subber(m):
            L = list(m.group(2))
            random.shuffle(L)
            return '%s%s%s' % (m.group(1), ''.join(L), m.group(3))

        s = self._scrambleRe.sub(_subber, text)
        irc.reply(s)

    scramble = wrap(scramble, ['text'])

    _morseCode = {
        "A": ".-",
        "B": "-...",
        "C": "-.-.",
        "D": "-..",
        "E": ".",
        "F": "..-.",
        "G": "--.",
        "H": "....",
        "I": "..",
        "J": ".---",
        "K": "-.-",
        "L": ".-..",
        "M": "--",
        "N": "-.",
        "O": "---",
        "P": ".--.",
        "Q": "--.-",
        "R": ".-.",
        "S": "...",
        "T": "-",
        "U": "..-",
        "V": "...-",
        "W": ".--",
        "X": "-..-",
        "Y": "-.--",
        "Z": "--..",
        "0": "-----",
        "1": ".----",
        "2": "..---",
        "3": "...--",
        "4": "....-",
        "5": ".....",
        "6": "-....",
        "7": "--...",
        "8": "---..",
        "9": "----.",
        ".": ".-.-.-",
        ",": "--..--",
        ":": "---...",
        "?": "..--..",
        "'": ".----.",
        "-": "-....-",
        "/": "-..-.",
        '"': ".-..-.",
        "@": ".--.-.",
        "=": "-...-"
    }
    _revMorseCode = dict([(y, x) for (x, y) in _morseCode.items()])
    _unmorsere = re.compile('([.-]+)')

    @internationalizeDocstring
    def unmorse(self, irc, msg, args, text):
        """<Morse code text>

        Does the reverse of the morse command.
        """
        text = text.replace('_', '-')

        def morseToLetter(m):
            s = m.group(1)
            return self._revMorseCode.get(s, s)

        text = self._unmorsere.sub(morseToLetter, text)
        text = text.replace('  ', '\x00')
        text = text.replace(' ', '')
        text = text.replace('\x00', ' ')
        irc.reply(text)

    unmorse = wrap(unmorse, ['text'])

    @internationalizeDocstring
    def morse(self, irc, msg, args, text):
        """<text>

        Gives the Morse code equivalent of a given string.
        """
        L = []
        for c in text.upper():
            L.append(self._morseCode.get(c, c))
        irc.reply(' '.join(L))

    morse = wrap(morse, ['text'])

    @internationalizeDocstring
    def reverse(self, irc, msg, args, text):
        """<text>

        Reverses <text>.
        """
        irc.reply(text[::-1])

    reverse = wrap(reverse, ['text'])

    @internationalizeDocstring
    def _color(self, c, fg=None):
        if c == ' ':
            return c
        if fg is None:
            fg = random.randint(2, 15)
        fg = str(fg).zfill(2)
        return '\x03%s%s' % (fg, c)

    @internationalizeDocstring
    def colorize(self, irc, msg, args, text):
        """<text>

        Returns <text> with each character randomly colorized.
        """
        if minisix.PY2:
            text = text.decode('utf-8')
        text = ircutils.stripColor(text)
        L = [self._color(c) for c in text]
        if minisix.PY2:
            L = [c.encode('utf-8') for c in L]
        irc.reply('%s%s' % (''.join(L), '\x03'))

    colorize = wrap(colorize, ['text'])

    @internationalizeDocstring
    def rainbow(self, irc, msg, args, text):
        """<text>

        Returns <text> colorized like a rainbow.
        """
        if minisix.PY2:
            text = text.decode('utf-8')
        text = ircutils.stripColor(text)
        colors = utils.iter.cycle([
            '05', '04', '07', '08', '09', '03', '11', '10', '12', '02', '06',
            '13'
        ])
        L = [self._color(c, fg=next(colors)) for c in text]
        if minisix.PY2:
            L = [c.encode('utf-8') for c in L]
        irc.reply(''.join(L) + '\x03')

    rainbow = wrap(rainbow, ['text'])

    @wrap(['text'])
    def stripformatting(self, irc, msg, args, text):
        """<text>

        Strips bold, underline, and colors from <text>."""
        irc.reply(ircutils.stripFormatting(text))

    @internationalizeDocstring
    def stripcolor(self, irc, msg, args, text):
        """<text>

        Returns <text> stripped of all color codes.
        """
        irc.reply(ircutils.stripColor(text))

    stripcolor = wrap(stripcolor, ['text'])

    @internationalizeDocstring
    def aol(self, irc, msg, args, text):
        """<text>

        Returns <text> as if an AOL user had said it.
        """
        text = text.replace(' you ', ' u ')
        text = text.replace(' are ', ' r ')
        text = text.replace(' love ', ' <3 ')
        text = text.replace(' luv ', ' <3 ')
        text = text.replace(' too ', ' 2 ')
        text = text.replace(' to ', ' 2 ')
        text = text.replace(' two ', ' 2 ')
        text = text.replace('fore', '4')
        text = text.replace(' for ', ' 4 ')
        text = text.replace('be', 'b')
        text = text.replace('four', ' 4 ')
        text = text.replace(' their ', ' there ')
        text = text.replace(', ', ' ')
        text = text.replace(',', ' ')
        text = text.replace("'", '')
        text = text.replace('one', '1')
        smiley = utils.iter.choice(['<3', ':)', ':-)', ':D', ':-D'])
        text += smiley * 3
        irc.reply(text)

    aol = wrap(aol, ['text'])

    @internationalizeDocstring
    def jeffk(self, irc, msg, args, text):
        """<text>

        Returns <text> as if JeffK had said it himself.
        """
        def randomlyPick(L):
            return utils.iter.choice(L)

        def quoteOrNothing(m):
            return randomlyPick(['"', '']).join(m.groups())

        def randomlyReplace(s, probability=0.5):
            def f(m):
                if random.random() < probability:
                    return m.expand(s)
                else:
                    return m.group(0)

            return f

        def randomExclaims(m):
            if random.random() < 0.85:
                return ('!' * random.randrange(1, 5)) + m.group(1)
            else:
                return '.' + m.group(1)

        def randomlyShuffle(m):
            L = list(m.groups())
            random.shuffle(L)
            return ''.join(L)

        def lessRandomlyShuffle(m):
            L = list(m.groups())
            if random.random() < .4:
                random.shuffle(L)
            return ''.join(L)

        def randomlyLaugh(text, probability=.3):
            if random.random() < probability:
                if random.random() < .5:
                    insult = utils.iter.choice([
                        ' fagot1', ' fagorts', ' jerks', 'fagot'
                        ' jerk', 'dumbshoes', ' dumbshoe'
                    ])
                else:
                    insult = ''
                laugh1 = utils.iter.choice(['ha', 'hah', 'lol', 'l0l', 'ahh'])
                laugh2 = utils.iter.choice(['ha', 'hah', 'lol', 'l0l', 'ahh'])
                laugh1 = laugh1 * random.randrange(1, 5)
                laugh2 = laugh2 * random.randrange(1, 5)
                exclaim = utils.iter.choice(
                    ['!', '~', '!~', '~!!~~', '!!~', '~~~!'])
                exclaim += utils.iter.choice(
                    ['!', '~', '!~', '~!!~~', '!!~', '~~~!'])
                if random.random() < 0.5:
                    exclaim += utils.iter.choice(
                        ['!', '~', '!~', '~!!~~', '!!~', '~~~!'])
                laugh = ''.join([' ', laugh1, laugh2, insult, exclaim])
                text += laugh
            return text

        if random.random() < .03:
            irc.reply(randomlyLaugh('NO YUO', probability=1))
            return
        alwaysInsertions = {
            r'er\b': 'ar',
            r'\bthe\b': 'teh',
            r'\byou\b': 'yuo',
            r'\bis\b': 'si',
            r'\blike\b': 'liek',
            r'[^e]ing\b': 'eing',
        }
        for (r, s) in alwaysInsertions.items():
            text = re.sub(r, s, text)
        randomInsertions = {
            r'i': 'ui',
            r'le\b': 'al',
            r'i': 'io',
            r'l': 'll',
            r'to': 'too',
            r'that': 'taht',
            r'[^s]c([ei])': r'sci\1',
            r'ed\b': r'e',
            r'\band\b': 'adn',
            r'\bhere\b': 'hear',
            r'\bthey\'re': 'their',
            r'\bthere\b': 'they\'re',
            r'\btheir\b': 'there',
            r'[^e]y': 'ey',
        }
        for (r, s) in randomInsertions.items():
            text = re.sub(r, randomlyReplace(s), text)
        text = re.sub(r'(\w)\'(\w)', quoteOrNothing, text)
        text = re.sub(r'\.(\s+|$)', randomExclaims, text)
        text = re.sub(r'([aeiou])([aeiou])', randomlyShuffle, text)
        text = re.sub(r'([bcdfghkjlmnpqrstvwxyz])([bcdfghkjlmnpqrstvwxyz])',
                      lessRandomlyShuffle, text)
        text = randomlyLaugh(text)
        if random.random() < .4:
            text = text.upper()
        irc.reply(text)

    jeffk = wrap(jeffk, ['text'])

    # Keeping these separate so people can just replace the alphabets for
    # whatever their language of choice
    _spellLetters = {
        'a': _('ay'),
        'b': _('bee'),
        'c': _('see'),
        'd': _('dee'),
        'e': _('ee'),
        'f': _('eff'),
        'g': _('gee'),
        'h': _('aych'),
        'i': _('eye'),
        'j': _('jay'),
        'k': _('kay'),
        'l': _('ell'),
        'm': _('em'),
        'n': _('en'),
        'o': _('oh'),
        'p': _('pee'),
        'q': _('cue'),
        'r': _('arr'),
        's': _('ess'),
        't': _('tee'),
        'u': _('you'),
        'v': _('vee'),
        'w': _('double-you'),
        'x': _('ecks'),
        'y': _('why'),
        'z': _('zee')
    }
    for (k, v) in list(_spellLetters.items()):
        _spellLetters[k.upper()] = v
    _spellPunctuation = {
        '!': _('exclamation point'),
        '"': _('quote'),
        '#': _('pound'),
        '$': _('dollar sign'),
        '%': _('percent'),
        '&': _('ampersand'),
        '\'': _('single quote'),
        '(': _('left paren'),
        ')': _('right paren'),
        '*': _('asterisk'),
        '+': _('plus'),
        ',': _('comma'),
        '-': _('minus'),
        '.': _('period'),
        '/': _('slash'),
        ':': _('colon'),
        ';': _('semicolon'),
        '<': _('less than'),
        '=': _('equals'),
        '>': _('greater than'),
        '?': _('question mark'),
        '@': _('at'),
        '[': _('left bracket'),
        '\\': _('backslash'),
        ']': _('right bracket'),
        '^': _('caret'),
        '_': _('underscore'),
        '`': _('backtick'),
        '{': _('left brace'),
        '|': _('pipe'),
        '}': _('right brace'),
        '~': _('tilde')
    }
    _spellNumbers = {
        '0': _('zero'),
        '1': _('one'),
        '2': _('two'),
        '3': _('three'),
        '4': _('four'),
        '5': _('five'),
        '6': _('six'),
        '7': _('seven'),
        '8': _('eight'),
        '9': _('nine')
    }

    @internationalizeDocstring
    def spellit(self, irc, msg, args, text):
        """<text>

        Returns <text>, phonetically spelled out.
        """
        d = {}
        if self.registryValue('spellit.replaceLetters'):
            d.update(self._spellLetters)
        if self.registryValue('spellit.replaceNumbers'):
            d.update(self._spellNumbers)
        if self.registryValue('spellit.replacePunctuation'):
            d.update(self._spellPunctuation)


# A bug in unicode on OSX prevents me from testing this.
##         dd = {}
##         for (c, v) in d.items():
##             dd[ord(c)] = unicode(v + ' ')
##         irc.reply(unicode(text).translate(dd))
        out = minisix.io.StringIO()
        write = out.write
        for c in text:
            try:
                c = d[c]
                write(' ')
            except KeyError:
                pass
            write(c)
        irc.reply(out.getvalue().strip())

    spellit = wrap(spellit, ['text'])

    @internationalizeDocstring
    def gnu(self, irc, msg, args, text):
        """<text>

        Returns <text> as GNU/RMS would say it.
        """
        irc.reply(' '.join(['GNU/' + s for s in text.split()]))

    gnu = wrap(gnu, ['text'])

    @internationalizeDocstring
    def shrink(self, irc, msg, args, text):
        """<text>

        Returns <text> with each word longer than
        supybot.plugins.Filter.shrink.minimum being shrunken (i.e., like
        "internationalization" becomes "i18n").
        """
        L = []
        minimum = self.registryValue('shrink.minimum', msg.channel,
                                     irc.network)
        r = re.compile(r'[A-Za-z]{%s,}' % minimum)

        def shrink(m):
            s = m.group(0)
            return ''.join((s[0], str(len(s) - 2), s[-1]))

        text = r.sub(shrink, text)
        irc.reply(text)

    shrink = wrap(shrink, ['text'])

    # TODO: 2,4,;
    # XXX suckiest: B,K,P,Q,T
    # alternatives: 3: U+2107
    _uniudMap = {
        ' ': ' ',
        '0': '0',
        '@': '@',
        '!': '\u00a1',
        '1': '1',
        'A': '\u2200',
        '"': '\u201e',
        '2': '\u2681',
        'B': 'q',
        '#': '#',
        '3': '\u0190',
        'C': '\u0186',
        '$': '$',
        '4': '\u2683',
        'D': '\u15e1',
        '%': '%',
        '5': '\u1515',
        'E': '\u018e',
        '&': '\u214b',
        '6': '9',
        'F': '\u2132',
        "'": '\u0375',
        '7': 'L',
        'G': '\u2141',
        '(': ')',
        '8': '8',
        'H': 'H',
        ')': '(',
        '9': '6',
        'I': 'I',
        '*': '*',
        ':': ':',
        'J': '\u148b',
        '+': '+',
        ';': ';',
        'K': '\u029e',
        ',': '\u2018',
        '<': '>',
        'L': '\u2142',
        '-': '-',
        '=': '=',
        'M': '\u019c',
        '.': '\u02d9',
        '>': '<',
        'N': 'N',
        '/': '/',
        '?': '\u00bf',
        'O': 'O',
        'P': 'd',
        '`': '\u02ce',
        'p': 'd',
        'Q': 'b',
        'a': '\u0250',
        'q': 'b',
        'R': '\u1d1a',
        'b': 'q',
        'r': '\u0279',
        'S': 'S',
        'c': '\u0254',
        's': 's',
        'T': '\u22a5',
        'd': 'p',
        't': '\u0287',
        'U': '\u144e',
        'e': '\u01dd',
        '': 'n',
        'V': '\u039b',
        'f': '\u214e',
        'v': '\u028c',
        'W': 'M',
        'g': '\u0253',
        'w': '\u028d',
        'X': 'X',
        'h': '\u0265',
        'x': 'x',
        'Y': '\u2144',
        'i': '\u1d09',
        'y': '\u028e',
        'Z': 'Z',
        'j': '\u027f',
        'z': 'z',
        '[': ']',
        'k': '\u029e',
        '{': '}',
        '\\': '\\',
        'l': '\u05df',
        '|': '|',
        ']': '[',
        'm': '\u026f',
        '}': '{',
        '^': '\u2335',
        'n': '',
        '~': '~',
        '_': '\u203e',
        'o': 'o',
    }

    @internationalizeDocstring
    def uniud(self, irc, msg, args, text):
        """<text>

        Returns <text> rotated 180 degrees. Only really works for ASCII
        printable characters.
        """
        turned = []
        tlen = 0
        for c in text:
            if c in self._uniudMap:
                tmp = self._uniudMap[c]
                if not len(tmp):
                    tmp = '\ufffd'
                turned.append(tmp)
                tlen += 1
            elif c == '\t':
                tablen = 8 - tlen % 8
                turned.append(' ' * tablen)
                tlen += tablen
            elif ord(c) >= 32:
                turned.append(c)
                tlen += 1
        s = '%s \x02 \x02' % ''.join(reversed(turned))
        irc.reply(s)

    uniud = wrap(uniud, ['text'])

    def capwords(self, irc, msg, args, text):
        """<text>

        Capitalises the first letter of each word.
        """
        text = string.capwords(text)
        irc.reply(text)

    capwords = wrap(capwords, ['text'])

    def caps(self, irc, msg, args, text):
        """<text>

        EVERYONE LOVES CAPS LOCK.
        """
        irc.reply(text.upper())

    caps = wrap(caps, ['text'])

    _vowelrottrans = utils.str.MultipleReplacer(
        dict(list(zip('aeiouAEIOU', 'eiouaEIOUA'))))

    def vowelrot(self, irc, msg, args, text):
        """<text>

        Returns <text> with vowels rotated
        """
        text = self._vowelrottrans(text)
        irc.reply(text)

    vowelrot = wrap(vowelrot, ['text'])

    _uwutrans = utils.str.MultipleReplacer(dict(list(zip('lrLR', 'wwWW'))))

    def uwu(self, irc, msg, args, text):
        """<text>

        Returns <text> in uwu-speak.
        """
        text = self._uwutrans(text)
        text += random.choice([''] * 10 + [' uwu', ' UwU', ' owo', ' OwO'])
        irc.reply(text)

    uwu = wrap(uwu, ['text'])
コード例 #49
0
ファイル: __init__.py プロジェクト: boxed/curia
def migrate_users_from_SKForum(request=None):
    #if not request.user.is_superuser:
    #raise Exception('super user specific action')

    cursor = get_cursor()
    hex = codecs.getencoder('hex')
    latin1 = codecs.getdecoder('latin1')

    everyone = get_everyone_group()

    cursor.execute(
        'select id, name, password, realname, SecretEmail, PublicEmail, email, ICQ, telephone, mobilephone, address, other, birthdate from users where id != 1'
    )
    users = cursor.fetchall()
    for user in users:
        u = User(id=user[0],
                 username=fix_string(user[1]),
                 email=user[4],
                 is_staff=False,
                 is_superuser=False)

        if u.email == None:
            u.email = user[5]
        if u.email == None:
            u.email = user[6]

        hexpassword = hex(base64.b64decode(user[2]))
        if hexpassword[1] > 18:
            u.password = "******" + hexpassword[0]
        else:
            u.password = "******"

        realname = user[3].rsplit(None, 1)
        if len(realname) >= 1:
            u.first_name = fix_string(realname[0])
        if u.first_name == None:
            u.first_name = ''
        if len(realname) >= 2:
            u.last_name = fix_string(realname[1])
        if u.last_name == None:
            u.last_name = ''

        u.save()

        # display name 1
        #d = Detail(name='display name', value=fix_string(user[1]), user=u)
        #d.save()

        def add_detail(object, name):
            if object != None and object != '' and object != 'null':
                d = Detail(name=name, value=fix_string(object), user=u)
                if d.value != None:
                    d.save()

        add_detail(user[5], 'public email')
        add_detail(user[6], 'protected email')
        add_detail(user[7], 'ICQ')
        add_detail(user[8], 'telephone')
        add_detail(user[9], 'mobilephone')
        add_detail(user[10], 'address')
        add_detail(user[11], 'other')

        # birthdate 12
        if user[12] != None and user[12] != '' and user[12] != 'null':
            m = MetaUser(user=u, birthday=user[12])
            m.save()

    db.close()
コード例 #50
0
#!/usr/bin/env python
# MIME Header Parsing - Chapter 9
# mime_parse_headers.py
# This program requires Python 2.2.2 or above

import sys, email, codecs
from email import Header

msg = email.message_from_file(sys.stdin)
for header, value in msg.items():
    headerparts = Header.decode_header(value)
    headerval = []
    for part in headerparts:
        data, charset = part
	if charset is None:
	    charset = 'ascii'
	dec = codecs.getdecoder(charset)
	enc = codecs.getencoder('iso-8859-1')
	data = enc(dec(data)[0])[0]
	headerval.append(data)
    print "%s: %s" % (header, " ".join(headerval))

コード例 #51
0

class InvalidRegistryValue(RegistryException):
    pass


class NonExistentRegistryEntry(RegistryException, AttributeError):
    # If we use hasattr() on a configuration group/value, Python 3 calls
    # __getattr__ and looks for an AttributeError, so __getattr__ has to
    # raise an AttributeError if a registry entry does not exist.
    pass


ENCODING = 'string_escape' if minisix.PY2 else 'unicode_escape'
decoder = codecs.getdecoder(ENCODING)
encoder = codecs.getencoder(ENCODING)

if hasattr(time, 'monotonic'):
    monotonic_time = time.monotonic
else:
    # fallback for python < 3.3
    monotonic_time = time.time

_cache = utils.InsensitivePreservingDict()
_lastModified = 0


def open_registry(filename, clear=False):
    """Initializes the module by loading the registry file into memory."""
    global _lastModified
    if clear:
コード例 #52
0
def encoded(enc, txt=docText):
    return codecs.getencoder(enc)(txt, 'xmlcharrefreplace')[0]
コード例 #53
0
ファイル: core.py プロジェクト: tingletech/solrpy
    def __init__(self,
                 url,
                 persistent=True,
                 timeout=None,
                 ssl_key=None,
                 ssl_cert=None,
                 http_user=None,
                 http_pass=None,
                 post_headers={},
                 max_retries=3,
                 debug=False):
        """
            url -- URI pointing to the Solr instance. Examples:

                http://localhost:8080/solr
                https://solr-server/solr

                Your python install must be compiled with SSL support for the
                https:// schemes to work. (Most pre-packaged pythons are.)

            persistent -- Keep a persistent HTTP connection open.
                Defaults to true

            timeout -- Timeout, in seconds, for the server to response.
                By default, use the python default timeout (of none?)

            ssl_key, ssl_cert -- If using client-side key files for
                SSL authentication,  these should be, respectively,
                your PEM key file and certificate file.

            http_user, http_pass -- If given, include HTTP Basic authentication 
                in all request headers.

        """

        self.scheme, self.host, self.path = urlparse.urlparse(url, 'http')[:3]
        self.url = url

        assert self.scheme in ('http', 'https')

        self.persistent = persistent
        self.reconnects = 0
        self.timeout = timeout
        self.ssl_key = ssl_key
        self.ssl_cert = ssl_cert
        self.max_retries = int(max_retries)

        assert self.max_retries >= 0

        kwargs = {}

        if self.timeout and _python_version >= 2.6 and _python_version < 3:
            kwargs['timeout'] = self.timeout

        if self.scheme == 'https':
            self.conn = httplib.HTTPSConnection(self.host,
                                                key_file=ssl_key,
                                                cert_file=ssl_cert,
                                                **kwargs)
        else:
            self.conn = httplib.HTTPConnection(self.host, **kwargs)

        self.response_version = 2.2
        self.encoder = codecs.getencoder('utf-8')

        # Responses from Solr will always be in UTF-8
        self.decoder = codecs.getdecoder('utf-8')

        # Set timeout, if applicable.
        if self.timeout and _python_version < 2.6:
            self.conn.connect()
            if self.scheme == 'http':
                self.conn.sock.settimeout(self.timeout)
            elif self.scheme == 'https':
                self.conn.sock.sock.settimeout(self.timeout)

        self.xmlheaders = {'Content-Type': 'text/xml; charset=utf-8'}
        self.xmlheaders.update(post_headers)
        if not self.persistent:
            self.xmlheaders['Connection'] = 'close'

        self.form_headers = {
            'Content-Type': 'application/x-www-form-urlencoded; charset=utf-8'
        }

        if http_user is not None and http_pass is not None:
            http_auth = http_user + ':' + http_pass
            http_auth = 'Basic ' + http_auth.encode('base64').strip()
            self.auth_headers = {'Authorization': http_auth}
        else:
            self.auth_headers = {}

        if not self.persistent:
            self.form_headers['Connection'] = 'close'

        self.debug = debug
        self.select = SearchHandler(self, "/select")
コード例 #54
0
ファイル: utils.py プロジェクト: benhe119/gate
try:
    import simplejson as json
except ImportError:
    import json
import cPickle as pickle
import glob
from urlparse import urlparse as stdlib_urlparse, ParseResult
import itertools

import eventlet
from eventlet import GreenPool, sleep, Timeout
from eventlet.green import socket, threading
import netifaces
import codecs
utf8_decoder = codecs.getdecoder('utf-8')
utf8_encoder = codecs.getencoder('utf-8')

from logging.handlers import SysLogHandler
import logging

# setup notice level logging

NOTICE = 25
logging._levelNames[NOTICE] = 'NOTICE'
SysLogHandler.priority_map['NOTICE'] = 'notice'

# These are lazily pulled from libc elsewhere

_sys_fsync = None
_sys_fallocate = None
_posix_fadvise = None
コード例 #55
0
    def _getDataIo(self, file):
        """
        This gets a file either url or local file and defiens what the format is as well as dialect
        :param file: file path or url
        :return: data_io, format, dialect
        """

        ############
        # get file as io object
        ############

        data = BytesIO()

        # get data from either url or file load in memory
        if file.startswith('http:') or file.startswith('https:'):
            r = requests.get(file, stream=True)
            if r.status_code == 200:
                for chunk in r:
                    data.write(chunk)
            data.seek(0)

        # else read file from local file system
        else:
            try:
                data = open(file, 'rb')
            except Exception as e:
                error = 'Could not load file, possible exception : {exception}'.format(
                    exception=e)
                log.error(error)
                raise ValueError(error)

        dialect = None

        ############
        # check for file type
        ############

        # try to guess if its an excel file
        xlsx_sig = b'\x50\x4B\x05\06'
        xlsx_sig2 = b'\x50\x4B\x03\x04'
        xls_sig = b'\x09\x08\x10\x00\x00\x06\x05\x00'

        # different whence, offset, size for different types
        excel_meta = [('xls', 0, 512, 8), ('xlsx', 2, -22, 4)]

        for filename, whence, offset, size in excel_meta:

            try:
                data.seek(offset, whence)  # Seek to the offset.
                bytes = data.read(
                    size)  # Capture the specified number of bytes.
                data.seek(0)
                codecs.getencoder('hex')(bytes)

                if bytes == xls_sig:
                    return data, 'xls', dialect
                elif bytes == xlsx_sig:
                    return data, 'xlsx', dialect

            except:
                data.seek(0)

        # if not excel it can be a json file or a CSV, convert from binary to stringio

        byte_str = data.read()
        # Move it to StringIO
        try:
            # Handle Microsoft's BOM "special" UTF-8 encoding
            if byte_str.startswith(codecs.BOM_UTF8):
                data = StringIO(byte_str.decode('utf-8-sig'))
            else:
                data = StringIO(byte_str.decode('utf-8'))

        except:
            log.error(traceback.format_exc())
            log.error('Could not load into string')

        # see if its JSON
        buffer = data.read(100)
        data.seek(0)
        text = buffer.strip()
        # analyze first n characters
        if len(text) > 0:
            text = text.strip()
            # it it looks like a json, then try to parse it
            if text.startswith('{') or text.startswith('['):
                try:
                    json.loads(data.read())
                    data.seek(0)
                    return data, 'json', dialect
                except:
                    data.seek(0)
                    return data, None, dialect

        # lets try to figure out if its a csv
        try:
            data.seek(0)
            first_few_lines = []
            i = 0
            for line in data:
                if line in ['\r\n', '\n']:
                    continue
                first_few_lines.append(line)
                i += 1
                if i > 0:
                    break

            accepted_delimiters = [',', '\t', ';']
            dialect = csv.Sniffer().sniff(''.join(first_few_lines[0]),
                                          delimiters=accepted_delimiters)
            data.seek(0)
            # if csv dialect identified then return csv
            if dialect:
                return data, 'csv', dialect
            else:
                return data, None, dialect
        except:
            data.seek(0)
            log.error('Could not detect format for this file')
            log.error(traceback.format_exc())
            # No file type identified
            return data, None, dialect
コード例 #56
0
 def remotePrint(self, message):
   with self.jobLock:
     sys.stdout.write('PRINT')
     bytes = codecs.getencoder('UTF8')(message)[0]
     sys.stdout.write('%8d' % len(bytes))
     sys.stdout.write(bytes)
コード例 #57
0
ファイル: main.py プロジェクト: Phillip-May/LoRom-BSX-patcher
filesize = 0x00100000
pos = 2
count = 0
checksumtotal = 0

while (count < filesize):
    #only add numbers not part of the header
    if ((romtype == 0 and (count < 0x07FB0 or count > 0x07FDF))
            or (romtype == 1 and (count < 0x07FB0 or count > 0x07FDF))):
        #parameter here is what offset to seek to
        rawfile.seek(count, 0)
        #parameter defines how many bytes to read
        currentvalue = rawfile.read(1)
        outputresult = 0
        outputresult = codecs.getencoder('hex_codec')(currentvalue)[0]
        checksumtotal = checksumtotal + int.from_bytes(currentvalue,
                                                       byteorder='little')
        #print (outputresult)
    count = count + 1

#cast the result to an unsigned integer
struct.pack('I', checksumtotal)
#truncate to 16 bit and convert to hex string in one operation
checksumoutput = hex(checksumtotal & 0xffff)
inverse = hex(~checksumtotal & 0xffff)

checksumhighbyte = struct.pack('B', ((checksumtotal & 0xff00) >> 8))
checksumelowbyte = struct.pack('B', (checksumtotal & 0x00ff))

print("Something is going wrong with this")
コード例 #58
0
ファイル: ast_helper.py プロジェクト: sthagen/sast-scan
def decode_bytes(value):
    try:
        return value.decode("utf-8")
    except Exception:
        return codecs.getencoder("hex_codec")(value)[0].decode("utf-8")
コード例 #59
0
ファイル: util.py プロジェクト: mutita/E-Tipitaka-for-PC
                    indices[i], indices[-j] = indices[-j], indices[i]
                    yield tuple(pool[i] for i in indices[:r])
                    break
            else:
                return


if sys.platform == 'win32':
    now = time.clock
else:
    now = time.time

# Note: these functions return a tuple of (text, length), so when you call
# them, you have to add [0] on the end, e.g. str = utf8encode(unicode)[0]

utf8encode = codecs.getencoder("utf_8")
utf8decode = codecs.getdecoder("utf_8")

# Functions


def array_to_string(a):
    if IS_LITTLE:
        a = copy(a)
        a.byteswap()
    return a.tostring()


def string_to_array(typecode, s):
    a = array(typecode)
    a.fromstring(s)
コード例 #60
0
def signature(transaction_id, signing_key):
    signer = PKCS1_v1_5.new(signing_key)
    signature = signer.sign(transaction_id)
    hexify = codecs.getencoder("hex")
    return hexify(signature)[0].decode()