Exemplo n.º 1
0
 def __init__(self, filename, line, msg, charset, fuzzy, fmt, noqa):  # pylint: disable=too-many-arguments
     """Build a PO message."""
     self.filename = filename
     self.line = line
     # unescape strings
     if sys.version_info < (3,):
         # python 2.x
         msg = {k: escape_decode(v)[0] for k, v in msg.items()}
     else:
         # python 3.x
         msg = {k: escape_decode(v)[0]. decode(charset)
                for k, v in msg.items()}
     # build messages as a list of tuples: (string, translation)
     self.messages = []
     if 'msgid_plural' in msg:
         i = 0
         while True:
             key = 'msgstr[{0}]'.format(i)
             if key not in msg:
                 break
             self.messages.append((msg['msgid_plural'], msg[key]))
             i += 1
     else:
         self.messages.append((msg.get('msgid', ''), msg.get('msgstr', '')))
     self.fuzzy = fuzzy
     self.fmt = fmt
     self.noqa = noqa
Exemplo n.º 2
0
def esc(el, l):
    """Apply an escape match to a line annotated with '(esc)'"""
    ann = b(' (esc)\n')
    if el.endswith(ann):
        el = codecs.escape_decode(el[:-len(ann)])[0] + b('\n')
    if l.endswith(ann):
        l = codecs.escape_decode(l[:-len(ann)])[0] + b('\n')
    return el == l
Exemplo n.º 3
0
    def files_in_archive(self, force_refresh=False):
        if self._files_in_archive and not force_refresh:
            return self._files_in_archive

        cmd = [self.cmd_path, '--list', '-C', self.dest]
        if self.zipflag:
            cmd.append(self.zipflag)
        if self.opts:
            cmd.extend(['--show-transformed-names'] + self.opts)
        if self.excludes:
            cmd.extend(['--exclude=' + quote(f) for f in self.excludes])
        cmd.extend(['-f', self.src])
        rc, out, err = self.module.run_command(cmd, cwd=self.dest, environ_update=dict(LANG='C', LC_ALL='C', LC_MESSAGES='C'))
        if rc != 0:
            raise UnarchiveError('Unable to list files in the archive')

        for filename in out.splitlines():
            # Compensate for locale-related problems in gtar output (octal unicode representation) #11348
            # filename = filename.decode('string_escape')
            filename = to_native(codecs.escape_decode(filename)[0])

            if filename and filename not in self.excludes:
                # We don't allow absolute filenames.  If the user wants to unarchive rooted in "/"
                # they need to use "dest: '/'".  This follows the defaults for gtar, pax, etc.
                # Allowing absolute filenames here also causes bugs: https://github.com/ansible/ansible/issues/21397
                if filename.startswith('/'):
                    filename = filename[1:]

                self._files_in_archive.append(filename)

        return self._files_in_archive
Exemplo n.º 4
0
def raw_bytes(raw):
    """Make a bytes object out of a raw string.

    This is analogous to raw_unicode, but processes byte escape characters
    to produce a bytes object.
    """
    return codecs.escape_decode(raw)[0]
Exemplo n.º 5
0
 def str_to_re(self, string):
     """Convert plain string to escaped regexp that can be compiled"""
     if isinstance(string, str):
         string = string.encode('utf-8')
     string = codecs.escape_decode(string)[0]
     string = string.decode('utf-8')
     return "^%s$" % re_escape(string)
Exemplo n.º 6
0
def AssocInfoAdHoc():
    # Get encrypted password and decrypt it
    pw_encrypted = open(r'D:\webapps\_server\pyodbc\pw', 'r').read()
    key = codecs.escape_decode(open(r'D:\webapps\_server\pyodbc\key', 'r').read())[0]
    pw = crypto.decrypt_with_key(key, pw_encrypted)
    userid = 'ma17151'
    
    querystr = request.POST.get('sqlString', '').strip()
    cnxn_string = 'DSN=DSNOGW01;UID=' + userid + ';PWD=' + pw

    cnxn = pyodbc.connect(cnxn_string)
    cursor = cnxn.cursor()

    cursor.execute(querystr)
    cnxn.commit()
    resultset = cursor.fetchall()

    # Get column name of the resultset
    column_name_list = [tuple[0] for tuple in cursor.description]
    
    cursor.close()
    cnxn.close()

    # Send the response/result as Excel output
    response.content_type = 'application/vnd.ms-excel'
    output = template(r'D:\webapps\_templates\make_table.tpl', rows=resultset, column_names=column_name_list)
    return output
Exemplo n.º 7
0
 def insn__dot_ASCII_N(self, insn, mode, *args):
     """Insert the given text as bytes."""
     result = []
     for am, value, m in args:
         if am != 'STRING':
             raise AssemblerError('Bad argument (.ASCII only allows strings): %r' % value)
         result.extend([u'%s 8bit' % (ord(x),) for x in codecs.escape_decode(m.group('STR'))[0]])
     return u' '.join(result)
Exemplo n.º 8
0
def _parse_entry(line, quoted, escaped, get_bytes):
  """
  Parses the next entry from the given space separated content.

  :param str line: content to be parsed
  :param bool quoted: parses the next entry as a quoted value, removing the quotes
  :param bool escaped: unescapes the string

  :returns: **tuple** of the form (entry, remainder)

  :raises:
    * **ValueError** if quoted is True without the next value being quoted
    * **IndexError** if there's nothing to parse from the line
  """

  if line == '':
    raise IndexError('no remaining content to parse')

  next_entry, remainder = '', line

  if quoted:
    # validate and parse the quoted value
    start_quote, end_quote = _get_quote_indices(remainder, escaped)

    if start_quote != 0 or end_quote == -1:
      raise ValueError("the next entry isn't a quoted value: " + line)

    next_entry, remainder = remainder[1:end_quote], remainder[end_quote + 1:]
  else:
    # non-quoted value, just need to check if there's more data afterward
    if ' ' in remainder:
      next_entry, remainder = remainder.split(' ', 1)
    else:
      next_entry, remainder = remainder, ''

  if escaped:
    # Tor does escaping in its 'esc_for_log' function of 'common/util.c'. It's
    # hard to tell what controller functions use this in practice, but direct
    # users are...
    #
    #   * 'COOKIEFILE' field of PROTOCOLINFO responses
    #   * logged messages about bugs
    #   * the 'getinfo_helper_listeners' function of control.c
    #
    # Ideally we'd use "next_entry.decode('string_escape')" but it was removed
    # in python 3.x and 'unicode_escape' isn't quite the same...
    #
    #   https://stackoverflow.com/questions/14820429/how-do-i-decodestring-escape-in-python3

    next_entry = codecs.escape_decode(next_entry)[0]

    if stem.prereq.is_python_3() and not get_bytes:
      next_entry = stem.util.str_tools._to_unicode(next_entry)  # normalize back to str

  if get_bytes:
    next_entry = stem.util.str_tools._to_bytes(next_entry)

  return (next_entry, remainder.lstrip())
Exemplo n.º 9
0
 def load_string(self):
     """Copied from python 3.4 pickle.Unpickler.load_string"""
     data = self.readline()[:-1]
     # Strip outermost quotes
     if len(data) >= 2 and data[0] == data[-1] and data[0] in b"\"'":
         data = data[1:-1]
     else:
         raise pickle.UnpicklingError("the STRING opcode argument must be quoted")
     self.append(self._decode_string(codecs.escape_decode(data)[0]))
Exemplo n.º 10
0
 def load_string(self):
     data = self.readline()[:-1]
     # Strip outermost quotes
     if len(data) >= 2 and data[0] == data[-1] and data[0] in b'"\'':
         data = data[1:-1]
     else:
         raise UnpicklingError("the STRING opcode argument must be quoted")
     self.append(codecs.escape_decode(data)[0]
                 .decode(self.encoding, self.errors))
Exemplo n.º 11
0
def _unscramble_name(nameparts):
    name = ' '.join(nameparts)
    # no special chars
    if '\\' not in name:
        return name

    if name[0] == '"':
        name = name.strip('"')

    return codecs.escape_decode(name)[0].decode()
Exemplo n.º 12
0
 def str_to_re(self, string):
     """Convert plain string to escaped regexp that can be compiled"""
     if isinstance(string, text_type):
         string = string.encode("utf-8")
     if PY3:
         string = codecs.escape_decode(string)[0]
     else:
         string = string.decode("string_escape")
     string = string.decode("utf-8")
     return "^%s$" % re_escape(string)
Exemplo n.º 13
0
    def __init__(self, name, command, response_size, byte_filter, bit_filter, response_type):
        self.name = name
        # Decode any escaped hex chars that need to be sent to the device.
        self.command = codecs.escape_decode(command)[0]
        self.response_size = int(response_size)
        self.byte_filter = byte_filter
        self.bit_filter = bit_filter
        self.response_type = response_type

        # Initialize the response list
        self.responses = []
Exemplo n.º 14
0
def emailCurrentWeather():
    json = request.urlopen('http://api.wunderground.com/api/your_key/conditions/q/OH/Dublin.json')
    parsed_json = simplejson.load(json)
    
    recipient = ['*****@*****.**']

    key = codecs.escape_decode(open('/home/pi/tmp/key','r').read().strip())[0]
    pw = open('/home/pi/tmp/pw','r').read().strip()
    pwd = crypto.decrypt_with_key(key, pw)
    sender  = 'your_email'
    subject = 'Current Weather Conditions'

    top_border = "Current weather conditions from the Wunderground\n\n"+"Brought to you by the Raspberry Pi!\n"+\
    "*" * 45+"\n"
    bottom_border = "*" * 45+"\n\n"
    
    if parsed_json['current_observation']:
        city = parsed_json['current_observation']['display_location']['city']
        state = parsed_json['current_observation']['display_location']['state']
        current_temp = parsed_json['current_observation']['temperature_string']
        feels_like = parsed_json['current_observation']['feelslike_string']
        as_of = parsed_json['current_observation']['observation_time']
        rel_humidity = parsed_json['current_observation']['relative_humidity']
        weather = parsed_json['current_observation']['weather']
        forecast_url = parsed_json['current_observation']['forecast_url']
        
        email = top_border+as_of+"\n"+"City/State: "+city+", "+state+"\n"+bottom_border+ \
        "Current temp: "+current_temp+"\n"+"Feels like: "+feels_like+"\n"+"Rel. humidity: "+rel_humidity+"\n"+ \
        "Weather condition: "+weather+"\n"+"Forecast URL: "+forecast_url
        
        subject = subject+": "+current_temp
                
        msg = MIMEText(email)
        msg['Subject'] = subject
        msg['From'] = sender
        msg['To'] = COMMASPACE.join(recipient) # COMMASPACE still works ok with just one recipient
        #msg['CC'] = COMMASPACE.join(recipient)
        #msg['BCC'] = COMMASPACE.join(recipient)
        server = smtplib.SMTP('smtp.gmail.com:587')
        server.starttls()

        try:
            server.login(sender,pwd)
        except smtplib.SMTPAuthenticationError: # Check for authentication error
            return "ERROR"

        try:
            server.sendmail(sender, recipient, msg.as_string())
        except smtplib.SMTPRecipientsRefused:   # Check if recipient's email was accepted by the server
            return "ERROR"
        server.quit()
    else:
        now = datetime.now()
        print("Current weather conditions are not available at this time:",now.strftime("%Y-%m-%d %I:%M%p"))
Exemplo n.º 15
0
 def load_string(self):
     orig = self.readline()
     rep = orig[:-1]
     for q in (b'"', b"'"):  # double or single quote
         if rep.startswith(q):
             if not rep.endswith(q):
                 raise ValueError("insecure string pickle")
             rep = rep[len(q) : -len(q)]
             break
     else:
         raise ValueError("insecure string pickle: %r" % orig)
     self.append(codecs.escape_decode(rep)[0].decode(self.encoding, self.errors))
Exemplo n.º 16
0
Arquivo: git.py Projeto: GNOME/meld
        def get_real_path(name):
            name = name.strip()
            if os.name == "nt":
                # Git returns unix-style paths on Windows
                name = os.path.normpath(name)

            # Unicode file names and file names containing quotes are
            # returned by git as quoted strings
            if name[0] == '"':
                name = name.encode("latin1")
                name = codecs.escape_decode(name[1:-1])[0].decode("utf-8")
            return os.path.abspath(os.path.join(self.location, name))
Exemplo n.º 17
0
def escaped_str_to_bytes(data):
    """
    Take an escaped string and return the unescaped bytes equivalent.
    """
    if not isinstance(data, str):
        raise ValueError("data must be str")

    if six.PY2:
        return data.decode("string-escape")

    # This one is difficult - we use an undocumented Python API here
    # as per http://stackoverflow.com/a/23151714/934719
    return codecs.escape_decode(data)[0]
Exemplo n.º 18
0
def escaped_str_to_bytes(data):
    """
    Take an escaped string and return the unescaped bytes equivalent.

    Raises:
        ValueError, if the escape sequence is invalid.
    """
    if not isinstance(data, str):
        raise ValueError("data must be str, but is {}".format(data.__class__.__name__))

    # This one is difficult - we use an undocumented Python API here
    # as per http://stackoverflow.com/a/23151714/934719
    return codecs.escape_decode(data)[0]
Exemplo n.º 19
0
def encrypt_with_key(key, privateInfo):
    """ Method to encrypt your message using AES encryption """
    
    BLOCK_SIZE = 16
    PADDING = '{'

    pad = lambda s: s + (BLOCK_SIZE - len(s) % BLOCK_SIZE) * PADDING

    EncodeAES = lambda c, s: base64.b64encode(c.encrypt(pad(s)))

    cipher = AES.new(codecs.escape_decode(key)[0])  # Need to use codecs to prevent key from being parsed with double slashes

    encoded = EncodeAES(cipher, privateInfo)
    return encoded.decode('utf-8')
Exemplo n.º 20
0
Arquivo: main.py Projeto: njsmith/zs
def binaryize(arg):
    if arg is None:
        return None
    if six.PY3:
        # arg is unicode. It might have been originally unicode (e.g. on
        # windows), or originally binary (e.g. on POSIX). If it was originally
        # unicode then we can definitely convert it to a utf-8 bytestring. If
        # it was originally bytes, then this is the magic incantation that
        # gives us back the original bytestring, no matter what it
        # was, assuming that the user is in a utf-8 locale.
        # (See http://legacy.python.org/dev/peps/pep-0383/)
        arg = arg.encode("utf-8", "surrogateescape")
    # now arg is a bytestring. we interpret \ escapes in a py2-and-py3
    # compatible way.
    return codecs.escape_decode(arg)[0]
Exemplo n.º 21
0
    def generate_suggestions(self):
        offset = 0
        data = self.obj_vm.read(offset, constants.SCAN_BLOCKSIZE)
        sig = codecs.escape_decode(bytes(str(self.obj_parent.DTBSignature), sys.getdefaultencoding()))[0]
        while data:
            found = data.find(sig, 0)
            while found >= 0:
                proc = obj.Object("_EPROCESS", offset = offset + found,
                                  vm = self.obj_vm)
                if b'Idle' in proc.ImageFileName.v():
                    yield proc.Pcb.DirectoryTableBase.v()
                found = data.find(sig, found + 1)

            offset += len(data)
            data = self.obj_vm.read(offset, constants.SCAN_BLOCKSIZE)
Exemplo n.º 22
0
 def initialize(self, randgen, max_termlength):
     # pre: validTermLengths(self, randgen, max_termlength)
     for i in self.initials:
         if self.verbose:
             print 'Generating random %s for %s' % (i.type, i.label)
         i.value = randgen.generate(i.type, max_termlength)
         while True:
             try:
                 f = open(i.label, 'w')
                 break
             except IOError as e:
                 i.iter = i.iter + 1
                 i.label = i.base + '_' + str(i.iter)
         f.write(codecs.escape_decode(i.value)[0])
         f.close()
Exemplo n.º 23
0
 def decode(self, lines):
     result = []
     for line in lines:
         if not line:
             pass
         elif line[0] == 'i':
             result.append(int(line[1:], 0))
         elif line[0] == 'h':
             result.append(line[1:].decode('hex'))
         elif line[0] == 's':
             result.append(codecs.escape_decode(line[1:])[0])
         elif line[0] == 'o':
             sys.stdout.write(line[1:])
         else:
             raise ValueError('unknown line type: %r' % (line,))
     return result
Exemplo n.º 24
0
 def word_string_literal(self, stack):
     """Put a string on the stack."""
     words = []
     while True:
         word = self.next_word()
         if word.endswith('"'):
             # emulate character wise reading
             if word != '"':
                 words.append(word[:-1])
             break
         words.append(word)
     text = codecs.escape_decode(u' '.join(words))[0]
     if self.compiling:
         self.frame.append(self.instruction_literal)
         self.frame.append(text)
     else:
         self.push(text)
Exemplo n.º 25
0
 def word_copy_words(self, stack):
     """Output a string."""
     words = []
     while True:
         word = self.next_word()
         if word.endswith('"'):
             # emulate character wise reading
             if word != '"':
                 words.append(word[:-1])
             break
         words.append(word)
     text = codecs.escape_decode(u' '.join(words))[0]
     if self.compiling:
         self.frame.append(self.instruction_output_text)
         self.frame.append(text)
     else:
         self.doctree.write(text)
Exemplo n.º 26
0
    def delete(self, item):
        if self.type == "TXT":
            # creating a _new_rdata will re-esacpe escaped characters,
            # so we have to 'unescape' them here (so that \\n doesn't become
            # \\\\n).
            item = codecs.escape_decode(item)[0]

            # If a TXT record, strip off '"' if present. Dns module will add this
            # automatically, and it breaks if we have it here.
            if item.startswith('"') and item.endswith('"'):
                item = item[1:-1]

        rd = _new_rdata(self.type, item)
        try:
            self._rdataset.remove(rd)
        except ValueError:
            raise RecordsError("No such item in record: %s" % item)
Exemplo n.º 27
0
def scan_wifi(iface):
    while True:
        try:
            with open('/dev/null', 'w') as null:
                scan = subprocess.check_output(['iwlist', iface, 'scanning'],
                                               stderr=null).decode()
        except subprocess.CalledProcessError:
            subprocess.call(['ip', 'link', 'set', iface, 'up'])

        else:
            break
    networks = SSID_REG.findall(scan)
    encryptions = PASSWORD_REG.findall(scan)
    for index, network in enumerate(networks[:]):
        if '\\' in network:
            networks[index] = codecs.escape_decode(network)[0].decode()
    return networks, encryptions
Exemplo n.º 28
0
def escaped_str_to_bytes(data):
    """
    Take an escaped string and return the unescaped bytes equivalent.
    """
    if not isinstance(data, six.string_types):
        if six.PY2:
            raise ValueError("data must be str or unicode, but is {}".format(data.__class__.__name__))
        raise ValueError("data must be str, but is {}".format(data.__class__.__name__))

    if six.PY2:
        if isinstance(data, unicode):
            data = data.encode("utf8")
        return data.decode("string-escape")

    # This one is difficult - we use an undocumented Python API here
    # as per http://stackoverflow.com/a/23151714/934719
    return codecs.escape_decode(data)[0]
Exemplo n.º 29
0
def value_from_fsnative(arg, escape):
    """Takes an item from argv and returns a text_type value without
    surrogate escapes or raises ValueError.
    """

    assert isinstance(arg, fsnative)

    if escape:
        bytes_ = fsn2bytes(arg, "utf-8")
        if PY2:
            bytes_ = bytes_.decode("string_escape")
        else:
            bytes_ = codecs.escape_decode(bytes_)[0]
        arg = bytes2fsn(bytes_, "utf-8")

    text = fsn2text(arg, strict=True)
    return text
Exemplo n.º 30
0
  def _ReadSpecificationFile(self, path):
    """Reads the format specification file.

    Args:
      path (str): path of the format specification file.

    Returns:
      FormatSpecificationStore: format specification store.
    """
    specification_store = specification.FormatSpecificationStore()

    with io.open(
        path, 'rt', encoding=self._SPECIFICATION_FILE_ENCODING) as file_object:
      for line in file_object.readlines():
        line = line.strip()
        if not line or line.startswith('#'):
          continue

        try:
          identifier, offset, pattern = line.split()
        except ValueError:
          logger.error('[skipping] invalid line: {0:s}'.format(line))
          continue

        try:
          offset = int(offset, 10)
        except ValueError:
          logger.error('[skipping] invalid offset in line: {0:s}'.format(line))
          continue

        try:
          # TODO: find another way to do this that doesn't use an undocumented
          # API.
          pattern = codecs.escape_decode(pattern)[0]
        # ValueError is raised e.g. when the patterns contains "\xg1".
        except ValueError:
          logger.error(
              '[skipping] invalid pattern in line: {0:s}'.format(line))
          continue

        format_specification = specification.FormatSpecification(identifier)
        format_specification.AddNewSignature(pattern, offset=offset)
        specification_store.AddSpecification(format_specification)

    return specification_store
Exemplo n.º 31
0
    def get(self):
        data = self.get_urldata()
        match = re.search("n.reduxState=(.*);", data)
        if not match:
            match = re.search(r"stateData = JSON.parse\(\"(.*)\"\)\<\/script",
                              data)
            if not match:
                yield ServiceError("Cant find video info.")
                return
            janson = json.loads(
                codecs.escape_decode(match.group(1))[0].decode("utf-8"))
            if janson["recipe"]["content"]["data"]["videoClips"]:
                vid = janson["recipe"]["content"]["data"]["videoClips"][0][
                    "id"]
            else:
                vid = janson["recipe"]["content"]["data"]["videoEpisodes"][0][
                    "id"]
            res = self.http.get(
                "https://api.svt.se/videoplayer-api/video/{}".format(vid))
        else:
            janson = json.loads(match.group(1))
            vid = janson["areaData"]["articles"][list(
                janson["areaData"]
                ["articles"].keys())[0]]["media"][0]["image"]["svtId"]
            res = self.http.get("https://api.svt.se/video/{}".format(vid))

        janson = res.json()
        if "subtitleReferences" in janson:
            for i in janson["subtitleReferences"]:
                if i["format"] == "websrt" and "url" in i:
                    yield subtitle(copy.copy(self.config),
                                   "wrst",
                                   i["url"],
                                   output=self.output)

        videos = self._get_video(janson)
        yield from videos
Exemplo n.º 32
0
 def OnFind(self, evt):
     et = evt.GetEventType()
     findstring = codecs.escape_decode(evt.GetFindString())[0]
     case = bool(evt.GetFlags() & 4)
     
     def abortcheck():
         wx.Yield()
         return self._abort_find
     
     def find_next():
         self._searching = True
         self._abort_find = False
         try:
             offset = self.hexEditor.bin.find(
                 self.hexView.offset + 1,
                 findstring,
                 casesensitive=case,
                 wrap = True,
                 abortcheck=abortcheck
             )
         except StopIteration:
             self.statusBar.SetStatusText("Search aborted", 0)
             self._find_start = None
         else:
             if offset is not None:
                 self.hexView.setSelection(offset, offset+len(findstring)-1, offset=offset)
                 self.statusBar.SetStatusText("String found at offset 0x%08x"% offset, 0)
                 self._find_start = offset
             else:
                 wx.Bell()
                 self.statusBar.SetStatusText("String not found", 0)
                 self._find_start = None
         self._searching = False
     
     if et == wx.wxEVT_COMMAND_FIND or et == wx.wxEVT_COMMAND_FIND_NEXT:
         self.statusBar.SetStatusText("Searching...", 0)
         find_next()
Exemplo n.º 33
0
def parse_kconfig(file: TextIO, allow_include: bool,
                  config: Dict[str, str]) -> None:
    for lineno, line in enumerate(file, 1):
        line = _line_re.match(line.rstrip()).group()
        if not line:
            continue

        match = _config_re.fullmatch(line)
        if match:
            config[match.group(1)] = match.group(2)
            continue

        if allow_include:
            match = _include_re.fullmatch(line)
            if match:
                include_path = os.path.join(
                    os.fsencode(os.path.dirname(file.name)),
                    codecs.escape_decode(match.group(1))[0],
                )
                with open(include_path, "r") as include_file:
                    parse_kconfig(include_file, True, config)
                continue

        print(f"{file.name}:{lineno}:invalid syntax", file=sys.stderr)
Exemplo n.º 34
0
def AssocInfo():
    # Get encrypted password and decrypt it
    pw_encrypted = open(r'D:\webapps\_server\pyodbc\pw', 'r').read()
    key = codecs.escape_decode(open(r'D:\webapps\_server\pyodbc\key', 'r').read())[0]
    pw = crypto.decrypt_with_key(key, pw_encrypted)
    userid = 'ma17151'
    
    querystr = request.POST.get('qryParameter', '').strip()
    cnxn_string = 'DSN=DSNOGW01;UID=' + userid + ';PWD=' + pw

    cnxn = pyodbc.connect(cnxn_string)
    cursor = cnxn.cursor()

    # Get the resultset
    resultset = AssocLookUp.getResultset(querystr, cursor)

    # Get column name of the resultset
    column_name_list = [tuple[0] for tuple in cursor.description]
    
    cursor.close()
    cnxn.close()

    output = template(r'D:\webapps\_templates\make_table.tpl', rows=resultset, column_names=column_name_list)
    return output
Exemplo n.º 35
0
 def get(self):
     par=serialparm.parse_args()
     payload=par.get('payload','')
     type=par.get('type','')
     try:
         # 错误示例-1,直接使用pick反序列化不可信数据
         if type=='pick_infected':
         #请求url:http://0.0.0.0:8888/api/deserialization/?payload=\x80\x03cposix\nsystem\nq\x00X\x06\x00\x00\x00whoamiq\x01\x85q\x02Rq\x03.'&type=pick_infected
             payload_byte=bytes(payload, encoding = "utf-8")
             import codecs
             payload_tuple = codecs.escape_decode(payload_byte, "hex-escape")
             return {"code": 200, "message": "{}".format(pickle.loads(payload_tuple[0]))}
         # 正确示例-1,直接使用json替代pick,json库为安全库
         if type=='pick_safe':
             return {"code": 200, "message": "{}".format(json.loads(payload))}
         # 错误示例-2,yaml[>5.1]设置加载模式为UnsafeLoader,或者使用5.1版本以下的都存在问题
         if type=='yaml_infected':
         #请求url:http://0.0.0.0:8888/api/deserialization/?payload=%21%21python%2Fobject%2Fnew%3Aos.system+%5B%22whoami%22%5D&type=yaml_infected
             return {"code": 200, "message": "{}".format(yaml.load(payload,Loader=yaml.UnsafeLoader))}
         # 正确示例-2,yaml[>5.1]设置加载模式为SafeLoader来序列化不可信数据
         if type=='yaml_safe':
             return {"code": 200, "message": "{}".format(yaml.load(payload,Loader=yaml.SafeLoader))}
     except Exception as e:
         return jsonify({"code": "异常", "message": "{}".format(e)})
Exemplo n.º 36
0
    def files_in_archive(self, force_refresh=False):
        if self._files_in_archive and not force_refresh:
            return self._files_in_archive

        cmd = [self.cmd_path, '--list', '-C', self.dest]
        if self.zipflag:
            cmd.append(self.zipflag)
        if self.opts:
            cmd.extend(['--show-transformed-names'] + self.opts)
        if self.excludes:
            cmd.extend(['--exclude=' + quote(f) for f in self.excludes])
        cmd.extend(['-f', self.src])
        rc, out, err = self.module.run_command(cmd,
                                               cwd=self.dest,
                                               environ_update=dict(
                                                   LANG='C',
                                                   LC_ALL='C',
                                                   LC_MESSAGES='C'))
        if rc != 0:
            raise UnarchiveError('Unable to list files in the archive')

        for filename in out.splitlines():
            # Compensate for locale-related problems in gtar output (octal unicode representation) #11348
            # filename = filename.decode('string_escape')
            filename = to_native(codecs.escape_decode(filename)[0])

            if filename and filename not in self.excludes:
                # We don't allow absolute filenames.  If the user wants to unarchive rooted in "/"
                # they need to use "dest: '/'".  This follows the defaults for gtar, pax, etc.
                # Allowing absolute filenames here also causes bugs: https://github.com/ansible/ansible/issues/21397
                if filename.startswith('/'):
                    filename = filename[1:]

                self._files_in_archive.append(filename)

        return self._files_in_archive
Exemplo n.º 37
0
import numpy as np
import _pickle as cPickle
import codecs

a2 = np.arange(24).reshape(3, 4, 2)
a = [[[2.45, 243], [3.45, 128]], [[1, 67], [8.6, 255]]]
b = cPickle.dumps(a)
print(b)
print(type(b))

s = str(b)[2:-1]
print(s)
print(type(s))

b1 = bytes(s, encoding="gbk")
print(b1)
print(type(b1))

b2 = codecs.escape_decode(b1, 'hex-escape')[0]
print(b2)
print(type(b2))

a1 = cPickle.loads(b2)
print(a1)
print(type(a1))
Exemplo n.º 38
0
 def parsed_separator(self):
     return codecs.escape_decode(bytes(self.separator,
                                       "utf-8"))[0].decode("utf-8")
Exemplo n.º 39
0
def _parse_entry(line: str, quoted: bool, escaped: bool,
                 get_bytes: bool) -> Tuple[Union[str, bytes], str]:
    """
  Parses the next entry from the given space separated content.

  :param line: content to be parsed
  :param quoted: parses the next entry as a quoted value, removing the quotes
  :param escaped: unescapes the string
  :param get_bytes: provides **bytes** for the entry rather than a **str**

  :returns: **tuple** of the form (entry, remainder)

  :raises:
    * **ValueError** if quoted is True without the next value being quoted
    * **IndexError** if there's nothing to parse from the line
  """

    if line == '':
        raise IndexError('no remaining content to parse')

    next_entry, remainder = '', line

    if quoted:
        # validate and parse the quoted value
        start_quote, end_quote = _get_quote_indices(remainder, escaped)

        if start_quote != 0 or end_quote == -1:
            raise ValueError("the next entry isn't a quoted value: " + line)

        next_entry, remainder = remainder[1:end_quote], remainder[end_quote +
                                                                  1:]
    else:
        # non-quoted value, just need to check if there's more data afterward
        if ' ' in remainder:
            next_entry, remainder = remainder.split(' ', 1)
        else:
            next_entry, remainder = remainder, ''

    if escaped:
        # Tor does escaping in its 'esc_for_log' function of 'common/util.c'. It's
        # hard to tell what controller functions use this in practice, but direct
        # users are...
        #
        #   * 'COOKIEFILE' field of PROTOCOLINFO responses
        #   * logged messages about bugs
        #   * the 'getinfo_helper_listeners' function of control.c
        #
        # Ideally we'd use "next_entry.decode('string_escape')" but it was removed
        # in python 3.x and 'unicode_escape' isn't quite the same...
        #
        #   https://stackoverflow.com/questions/14820429/how-do-i-decodestring-escape-in-python3

        next_entry = codecs.escape_decode(next_entry)[0]  # type: ignore

        if not get_bytes:
            next_entry = stem.util.str_tools._to_unicode(
                next_entry)  # normalize back to str

    if get_bytes:
        return (stem.util.str_tools._to_bytes(next_entry), remainder.lstrip())
    else:
        return (next_entry, remainder.lstrip())
Exemplo n.º 40
0
 def test_empty(self):
     self.assertEqual(codecs.escape_decode(b""), (b"", 0))
     self.assertEqual(codecs.escape_decode(bytearray()), (b"", 0))
Exemplo n.º 41
0
def load_model_from_pyml_exp_db(args_dict, only_done=True, logs_dir="."):
    # merge database from log files
    logs_dir = os.path.abspath(logs_dir)
    dfs = []
    for pyml_exp_db_path in glob.glob(os.path.join(logs_dir, "log*.db*")):
        logging.info("Loading experiments db %s" % pyml_exp_db_path)
        try:
            df = pyml_db_to_pandas(
                pyml_exp_db_path,
                args_dict=args_dict,
                exclude_args=[
                    "num_bootstraps",
                    "scores_file",
                    "inference",
                    "alpha",  # XXX rm
                    "num_threads",
                    "disable_eval",
                ])
        except (sqlite3.DatabaseError, pd.io.sql.DatabaseError) as e:
            # database is probably empy
            logging.error("DatabaseError: %s" % e)
            continue
        dfs.append(df)
    if not dfs:
        return
    df = pd.concat(dfs)
    df.fillna(value=np.nan, inplace=True)

    # maybe run was aborted
    if only_done:
        df = df.query("_state == 'done'")

    # get model binary string
    if len(df) == 0:
        return

    if not "model" in df.columns:
        logging.warn("Database %s doesn't have a 'model' column" %
                     (pyml_exp_db_path))
        return

    df = df[~df.model.isnull()]  # only consider experiments which with chckpts
    if len(df) == 0:
        return

    model_token = df.loc[df._end_date == df._end_date.max()].model.tolist()[0]
    try:
        # XXX hack to remove quotes from string
        model_token = model_token[1:-1]
        logging.info("Loading saved model state from %s" % model_token)
        state_dict = torch.load(model_token)
        print(state_dict)
        return state_dict, model_token
    except:
        # load model from binary string
        model_str = codecs.escape_decode(model_token[1:-1])[0][1:]

        copy = {}
        for param, val in args_dict.items():
            if isinstance(val, list):
                val = tuple(val)
            copy[param] = val
        key = hash(frozenset(copy.items()))
        ofile = "model%s.pkl" % key
        return unserialize_model(model_str), ofile
Exemplo n.º 42
0
def parse_args():
    """
    Argument parsing

    The client works by giving general options, a subcommand
    and then options specific to the subcommand.

    For example:

        clikraken ticker                    # just a subcommand
        clikraken depth --pair XETHZEUR     # subcommand option
        clikraken ohlc -i 15 -s 1508513700
        clikraken --raw olist               # global option
        clikraken place buy 0.1337 10.42    # subcommand argument
    """

    # some help strings that are repeated many times
    pairs_help = "comma delimited list of asset pairs"
    pair_help = "asset pair"

    epilog_str = textwrap.dedent("""\
        To get help about a subcommand use: clikraken SUBCOMMAND --help
        For example:
            clikraken place --help

        Current default currency pair: {dp}.

        Create or edit the setting file {usp} to change it.
        If the setting file doesn't exist yet, you can create one by doing:
            clikraken generate_settings > {usp}

        You can also set the CLIKRAKEN_DEFAULT_PAIR environment variable
        which has precedence over the settings from the settings file.
        """.format(dp=gv.DEFAULT_PAIR, usp=gv.USER_SETTINGS_PATH))

    parser = argparse.ArgumentParser(
        description='clikraken - Command line client for the Kraken exchange',
        epilog=epilog_str,
        formatter_class=argparse.RawDescriptionHelpFormatter)
    parser.add_argument('-V',
                        '--version',
                        action='store_const',
                        const=ck_utils.version,
                        dest='main_func',
                        help='show program version')
    parser.add_argument('--debug', action='store_true', help='debug mode')
    parser.add_argument('--raw',
                        action='store_true',
                        help='output raw json results from the API')
    parser.add_argument('--csv',
                        action='store_true',
                        help='output results from the API as CSV')
    parser.add_argument('--csvseparator',
                        default=';',
                        help='separator character to use with CSV output')
    parser.add_argument(
        '--cron',
        action='store_true',
        help=
        'activate cron mode (tone down errors due to timeouts or unavailable Kraken service)'
    )
    parser.set_defaults(main_func=None)

    subparsers = parser.add_subparsers(dest='subparser_name',
                                       help='available subcommands')

    # Generate setting.ini
    parser_gen_settings = subparsers.add_parser(
        'generate_settings',
        help='[clikraken] Print default settings.ini to stdout',
        formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    parser_gen_settings.set_defaults(
        sub_func=ck_utils.output_default_settings_ini)

    # ----------
    # Public API
    # ----------

    # Asset Pairs
    parser_asset_pairs = subparsers.add_parser(
        'asset_pairs',
        aliases=['ap'],
        help='[public] Get the list of available asset pairs',
        formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    parser_asset_pairs.set_defaults(sub_func=asset_pairs)

    # Ticker
    parser_ticker = subparsers.add_parser(
        'ticker',
        aliases=['t'],
        help='[public] Get the ticker',
        formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    parser_ticker.add_argument('-p',
                               '--pair',
                               default=gv.TICKER_PAIRS,
                               help=pairs_help + " to get info on. ")
    parser_ticker.set_defaults(sub_func=ticker)

    # Market depth (Order book)
    parser_depth = subparsers.add_parser(
        'depth',
        aliases=['d'],
        help='[public] Get the current market depth data',
        formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    parser_depth.add_argument('-p',
                              '--pair',
                              default=gv.DEFAULT_PAIR,
                              help=pair_help)
    parser_depth.add_argument('-c',
                              '--count',
                              type=int,
                              default=7,
                              help="maximum number of asks/bids.")
    parser_depth.set_defaults(sub_func=depth)

    # List of last trades
    parser_last_trades = subparsers.add_parser(
        'last_trades',
        aliases=['lt'],
        help='[public] Get the last trades',
        formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    parser_last_trades.add_argument('-p',
                                    '--pair',
                                    default=gv.DEFAULT_PAIR,
                                    help=pair_help)
    parser_last_trades.add_argument('-s',
                                    '--since',
                                    default=None,
                                    help="return trade data since given id")
    parser_last_trades.add_argument('-c',
                                    '--count',
                                    type=int,
                                    default=15,
                                    help="maximum number of trades.")
    parser_last_trades.set_defaults(sub_func=last_trades)

    # Open High Low Close data
    parser_ohlc = subparsers.add_parser(
        'ohlc',
        aliases=['oh'],
        help='[public] Get the ohlc data',
        formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    parser_ohlc.add_argument('-p',
                             '--pair',
                             default=gv.DEFAULT_PAIR,
                             help=pair_help)
    parser_ohlc.add_argument(
        '-i',
        '--interval',
        default=1,
        help=
        "return ohlc data for interval in minutes; 1, 5, 15, 30, 60, 240, 1440, 10800, 21600."
    )
    parser_ohlc.add_argument('-s',
                             '--since',
                             default=None,
                             help="return ohlc data since given id")
    parser_ohlc.add_argument('-c',
                             '--count',
                             type=int,
                             default=50,
                             help="maximum number of intervals.")
    parser_ohlc.set_defaults(sub_func=ohlc)

    # -----------
    # Private API
    # -----------

    # User balance
    parser_balance = subparsers.add_parser(
        'balance',
        aliases=['bal'],
        help='[private] Get your current balance',
        formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    parser_balance.set_defaults(sub_func=get_balance)

    # User trade balance
    parser_trade_balance = subparsers.add_parser(
        'trade_balance',
        aliases=['tbal'],
        help='[private] Get your current trade balance',
        formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    parser_trade_balance.set_defaults(sub_func=get_trade_balance)

    # Place an order
    parser_place = subparsers.add_parser(
        'place',
        aliases=['p'],
        help='[private] Place an order',
        formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    parser_place.add_argument('type', choices=['sell', 'buy'])
    parser_place.add_argument('volume', type=Decimal)
    parser_place.add_argument('price', default=None, nargs='?')
    parser_place.add_argument('-l',
                              '--leverage',
                              default="none",
                              help='leverage for margin trading')
    parser_place.add_argument('-p',
                              '--pair',
                              default=gv.DEFAULT_PAIR,
                              help=pair_help)
    parser_place.add_argument(
        '-t',
        '--ordertype',
        choices=['market', 'limit'],
        default='limit',
        help="order type. Currently implemented: [limit, market].")
    parser_place.add_argument('-s',
                              '--starttm',
                              default=0,
                              help="scheduled start time")
    parser_place.add_argument('-e',
                              '--expiretm',
                              default=0,
                              help="expiration time")
    parser_place.add_argument('-q',
                              '--viqc',
                              action='store_true',
                              help="volume in quote currency")
    parser_place.add_argument('-v',
                              '--validate',
                              action='store_true',
                              help="validate inputs only. do not submit order")
    parser_place.set_defaults(sub_func=place_order)

    # cancel an order
    parser_cancel = subparsers.add_parser(
        'cancel',
        aliases=['x'],
        help='[private] Cancel orders',
        formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    parser_cancel.add_argument('order_ids',
                               type=str,
                               nargs='+',
                               help="transaction ids")
    parser_cancel.set_defaults(sub_func=cancel_order)

    # List of open orders
    parser_olist = subparsers.add_parser(
        'olist',
        aliases=['ol'],
        help='[private] Get a list of your open orders',
        formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    parser_olist.add_argument('-p', '--pair', default=None, help=pair_help)
    parser_olist.add_argument(
        '-i',
        '--txid',
        default=None,
        help=
        'comma delimited list of transaction ids to query info about (20 maximum)'
    )
    parser_olist.set_defaults(sub_func=list_open_orders)

    # List of open positions
    parser_oplist = subparsers.add_parser(
        'positions',
        aliases=['pos'],
        help='[private] Get a list of your open positions',
        formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    parser_oplist.set_defaults(sub_func=list_open_positions)

    # List of closed orders
    parser_clist = subparsers.add_parser(
        'clist',
        aliases=['cl'],
        help='[private] Get a list of your closed orders',
        formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    parser_clist.add_argument('-p', '--pair', default=None, help=pair_help)
    parser_clist.add_argument(
        '-i',
        '--txid',
        default=None,
        help=
        'comma delimited list of transaction ids to query info about (20 maximum)'
    )
    parser_clist.set_defaults(sub_func=list_closed_orders)

    # Get ledgers info
    parser_ledgers = subparsers.add_parser(
        'ledgers',
        aliases=['lg'],
        help='[private] Get ledgers info',
        formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    parser_ledgers.add_argument(
        '-a',
        '--asset',
        default='all',
        help='comma delimited list of assets to restrict output to')
    parser_ledgers.add_argument(
        '-t',
        '--type',
        default='all',
        help=
        'type of ledger to retrieve. Possible values: all|deposit|withdrawal|trade|margin'
    )
    parser_ledgers.add_argument(
        '-s',
        '--start',
        default=None,
        help='starting unix timestamp or ledger id of results (exclusive)')
    parser_ledgers.add_argument(
        '-e',
        '--end',
        default=None,
        help='ending unix timestamp or ledger id of results (exclusive)')
    parser_ledgers.add_argument('-o',
                                '--ofs',
                                default=None,
                                help='result offset')
    parser_ledgers.add_argument(
        '-i',
        '--id',
        default=None,
        help=
        'comma delimited list of ledger ids to query info about (20 maximum)')
    parser_ledgers.set_defaults(sub_func=get_ledgers)

    # Get trades info
    parser_trades = subparsers.add_parser(
        'trades',
        aliases=['tr'],
        help='[private] Get trades history',
        formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    parser_trades.add_argument(
        '-t',
        '--type',
        default='all',
        help=
        'type of trade. Values: all|any position|closed position|closing position|no position'
    )
    # TODO: trades parameter
    parser_trades.add_argument(
        '-s',
        '--start',
        default=None,
        help='starting unix timestamp or trade tx id of results (exclusive)')
    parser_trades.add_argument(
        '-e',
        '--end',
        default=None,
        help='ending unix timestamp or trade tx id of results (exclusive)')
    parser_trades.add_argument('-o',
                               '--ofs',
                               default=None,
                               help='result offset')
    parser_trades.add_argument(
        '-i',
        '--id',
        default=None,
        help=
        'comma delimited list of transaction ids to query info about (20 maximum)'
    )
    parser_trades.add_argument('-p', '--pair', default=None, help=pair_help)
    parser_trades.set_defaults(sub_func=trades)

    # User Funding

    # Deposit Methods
    parser_deposit_methods = subparsers.add_parser(
        'deposit_methods',
        aliases=['dm'],
        help='[private] Get deposit methods',
        formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    parser_deposit_methods.add_argument('-a',
                                        '--asset',
                                        default=gv.DEFAULT_ASSET,
                                        help='asset being deposited')
    parser_deposit_methods.set_defaults(sub_func=get_deposit_methods)

    # Deposit Addresses
    parser_deposit_addresses = subparsers.add_parser(
        'deposit_addresses',
        aliases=['da'],
        help='[private] Get deposit addresses',
        formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    parser_deposit_addresses.add_argument('-a',
                                          '--asset',
                                          default=gv.DEFAULT_ASSET,
                                          help='asset being deposited')
    parser_deposit_addresses.add_argument('-m',
                                          '--method',
                                          default=None,
                                          help='name of the deposit method')
    parser_deposit_addresses.add_argument(
        '-n',
        '--new',
        action='store_true',
        help="whether or not to generate a new address")
    parser_deposit_addresses.add_argument('-1',
                                          '--one',
                                          action='store_true',
                                          help="return just one address")
    parser_deposit_addresses.set_defaults(sub_func=get_deposit_addresses)

    args = parser.parse_args()

    # make sure that either sub_func or main_func is defined
    # otherwise just print usage and exit
    # (this weird construction is a hack to work around Python bug #9351 https://bugs.python.org/issue9351)
    if all(
        [vars(args).get(f, None) is None for f in ['sub_func', 'main_func']]):
        parser.print_usage()
        sys.exit(0)

    gv.CRON = args.cron

    # Trick from https://stackoverflow.com/a/37059682/862188
    # in order to be able to parse things like "\t" or "\\" for example
    separator = codecs.escape_decode(bytes(args.csvseparator,
                                           "utf-8"))[0].decode("utf-8")
    gv.CSV_SEPARATOR = separator

    return args
Exemplo n.º 43
0
    "--extrusion",
    metavar=('EXTRUSION'),
    dest='extrusion',
    type=float,
    default=0.,
    help="EXTRUSION = (height of patch - box height) / 2, default 0.")
parser.add_argument("-d",
                    "--delimiter",
                    metavar=('DELIMITER'),
                    dest='inputdelimiter',
                    default=',',
                    help="delimiter to seperate coordinates, default ','.")

args = parser.parse_args()

delimiter = codecs.escape_decode(bytes(args.inputdelimiter,
                                       "utf-8"))[0].decode("utf-8")
# http://stackoverflow.com/questions/4020539/process-escape-sequences-in-a-string-in-python#answer-37059682
inputfileobj = open(args.MASKFILENAME, 'r')
filereader = csv.reader(inputfileobj, delimiter=delimiter)
linenum = 0
for coordquad in filereader:
    linenum += 1
    if (len(coordquad) < 4):
        raise (Exception("Error: incomplete quadruple detected at line %d." %
                         linenum))
    else:
        rtx = min(float(coordquad[0]), float(coordquad[2]))
        rty = min(float(coordquad[1]), float(coordquad[3]))
        rtw = abs(float(coordquad[0]) - float(coordquad[2]))
        rth = abs(float(coordquad[1]) - float(coordquad[3]))
        # rotate 180: (900,900) - All cood
Exemplo n.º 44
0
def unescapestr(s):
    return codecs.escape_decode(s)[0]
Exemplo n.º 45
0
    #     #define PROT_GROWSDOWN 0x01000000
    #     #define PROT_GROWSUP 0x02000000
    PROT_NONE = 0x0
    PROT_READ = 0x1
    PROT_WRITE = 0x2
    PROT_EXEC = 0x4

    # Machine code of an empty C function, generated with gcc
    # Disassembly:
    #     55        push   %ebp
    #     89 e5     mov    %esp,%ebp
    #     5d        pop    %ebp
    #     c3        ret
    #code = "\x48\x31\xc0\x50\x48\xbf\x2f\x62\x69\x6e\x2f\x2f\x73\x68\x57\xb0\x3b\x48\x89\xe7\x48\x31\xf6\x48\x31\xd2\x0f\x05"

    code = codecs.escape_decode(sys.argv[1])[0]

    # Get the address of the code
    addr = addressof(cast(c_char_p(code), POINTER(c_char)).contents)

    # Get the start of the page containing the code and set the permissions
    pagesize = 0x1000
    pagestart = addr & ~(pagesize - 1)
    if mprotect(pagestart, pagesize, PROT_READ | PROT_WRITE | PROT_EXEC):
        raise RuntimeError("Failed to set permissions using mprotect()")

    # Generate ctypes function object from code
    functype = CFUNCTYPE(None)
    f = functype(addr)

    # Call the function
Exemplo n.º 46
0
 def test_empty_escape_decode(self):
     self.assertEquals(codecs.escape_decode(""), ("", 0))
Exemplo n.º 47
0
 def OnToolSearchUpClick(self, evt):
     findString = codecs.escape_decode(self.ctrlSearch.GetSearchString())[0]
     if len(findString) == 0:
         wx.MessageBox("Please enter string to search", "Error", wx.ICON_ERROR)
         return
     self.DoSearch(findString, False)
Exemplo n.º 48
0
def unescape(value):
    return codecs.escape_decode(value)[0].decode('utf-8')
Exemplo n.º 49
0
 def print(self, data):
     data = codecs.escape_decode(bytes(data, "utf-8"))[0]
     self._print(data)
Exemplo n.º 50
0
def decoded(string):
    return codecs.escape_decode(bytes(string.replace(r'\"', r'\\"'), "utf-8"))[0].decode("utf-8")
Exemplo n.º 51
0
def decode_escaped_string(text, encoding='utf-8'):
    return codecs.escape_decode(text)[0].decode(encoding)
Exemplo n.º 52
0
# to a string.  The function must return a value that is compatible with
# FieldDescriptor.default_value and therefore a unicode string.
_DEFAULT_TO_STRING_MAP = {
    messages.IntegerField: unicode,
    messages.FloatField: unicode,
    messages.BooleanField: lambda value: value and u'true' or u'false',
    messages.BytesField: lambda value: codecs.escape_encode(value)[0],
    messages.StringField: lambda value: value,
    messages.EnumField: lambda value: unicode(value.number),
}

_DEFAULT_FROM_STRING_MAP = {
    messages.IntegerField: int,
    messages.FloatField: float,
    messages.BooleanField: lambda value: value == u'true',
    messages.BytesField: lambda value: codecs.escape_decode(value)[0],
    messages.StringField: lambda value: value,
    messages.EnumField: int,
}


class EnumValueDescriptor(messages.Message):
  """Enum value descriptor.

  Fields:
    name: Name of enumeration value.
    number: Number of enumeration value.
  """

  # TODO(rafek): Why are these listed as optional in descriptor.proto.
  # Harmonize?
Exemplo n.º 53
0
 def string_to_bytes(string):
     string = string.encode().hex()
     string = '\\x' + '\\x'.join(a + b
                                 for a, b in zip(string[::2], string[1::2]))
     return codecs.escape_decode(string, 'hex')[0]
Exemplo n.º 54
0
    def generate(
        self,
        prompt: str = "",
        temperature: float = 0.7,
        max_tokens: int = 32,
        stop: str = "",
        model: str = "davinci",
        bg: tuple = (31, 36, 40),
        accent: tuple = (0, 64, 0),
        pngquant: bool = False,
        output_txt: str = None,
        output_img: str = None,
        include_prompt: bool = True,
        include_coloring: bool = True,
        watermark: str = "Generated using GPT-3 via OpenAI's API",
    ):

        assert isinstance(stop, str), "stop is not a str."

        data = {
            "prompt": prompt,
            "max_tokens": max_tokens,
            "temperature": temperature,
            "stop": stop,
            "stream": True,
            "logprobs": 1,
        }

        console = Console(record=True)
        console.clear()

        if include_prompt:
            prompt_text = Text(prompt, style="bold", end="")
            console.print(prompt_text, end="")

        with httpx.stream(
            "POST",
            f"https://api.openai.com/v1/engines/{model}/completions",
            headers=self.headers,
            data=json.dumps(data),
            timeout=None,
        ) as r:
            for chunk in r.iter_text():
                text = chunk[6:]  # JSON chunks are prepended with "data: "
                if len(text) < 10 and "[DONE]" in text:
                    break

                temp_token = None
                logprobs = json.loads(text)["choices"][0]["logprobs"]
                tokens = logprobs["tokens"]
                token_logprobs = logprobs["token_logprobs"]
                for i in range(len(tokens)):
                    token = tokens[i]
                    log_prob = token_logprobs[i]

                    if token == stop or token == "<|endoftext|>":
                        break

                    if token.startswith("bytes:") and not temp_token:
                        # We need to hold the 2-byte token to the next 1-byte token
                        # to get the full bytestring to decode
                        #
                        # The API-returned tokens are in the form:
                        # "bytes:\xe2\x80" and "bytes:\x9d"
                        temp_token = token[6:]
                        temp_prob = log_prob
                    else:
                        if temp_token:
                            bytestring = temp_token + token[6:]

                            # https://stackoverflow.com/a/37059682/9314418
                            token = codecs.escape_decode(bytestring, "utf-8")[0].decode(
                                "utf-8"
                            )
                            temp_token = None
                            log_prob = temp_prob  # the true prob is the first one
                        text = Text(
                            token,
                            style=f"on {self.derive_token_bg(log_prob, bg, accent, include_coloring,)}",
                            end="",
                        )
                        console.print(text, end="")

        # Export the generated text as HTML.
        raw_html = self.replace_hex_colors(
            console.export_html(inline_styles=True, code_format="{code}", clear=False)
        )

        # Render the HTML as an image
        prompt_hash = hashlib.sha256(bytes(prompt, "utf-8")).hexdigest()[0:8]
        temp_string = str(temperature).replace(".", "_")
        timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")

        if output_img:
            img_file_name = output_img
        else:
            if not os.path.exists("img_output"):
                os.makedirs("img_output")
            img_file_name = f"img_output/{timestamp}__{prompt_hash}__{temp_string}.png"

        if self.imgmaker:
            self.imgmaker.generate(
                "dark.html",
                {
                    "html": raw_html.replace("\n", "</br>"),
                    "accent": f"rgb({accent[0]},{accent[1]},{accent[2]})",
                    "watermark": watermark,
                },
                width=450,
                height=600,
                downsample=False,
                output_file=img_file_name,
                use_pngquant=pngquant,
            )

        # Save the generated text to a plain-text file
        if output_txt:
            txt_file_name = output_txt
        else:
            if not os.path.exists("txt_output"):
                os.makedirs("txt_output")
            txt_file_name = f"txt_output/{prompt_hash}__{temp_string}.txt"

        with open(txt_file_name, "a", encoding="utf-8") as f:
            f.write(console.export_text() + "\n" + "=" * 20 + "\n")

        console.line()
Exemplo n.º 55
0
 def decode(self, input, final=False):
     return codecs.escape_decode(input, self.errors)[0]
Exemplo n.º 56
0
    def parse(self):
        isdir = os.path.isdir(self._path)
        if isdir:
            props = os.path.join(self._path, "font.props")
            if os.path.isfile(props):
                with open(props) as fd:
                    data = fd.readlines()
            else:
                raise Exception("Not an SFD directory")
        else:
            with open(self._path) as fd:
                data = fd.readlines()

        font = self._font
        info = font.info

        charData = None
        offsetMetrics = []

        i = 0
        while i < len(data):
            line = data[i]
            i += 1

            if ":" in line:
                key, value = [v.strip() for v in line.split(":", 1)]
            else:
                key = line.strip()
                value = None

            if i == 1:
                if key != "SplineFontDB":
                    raise Exception("Not an SFD file.")
                version = float(value)
                if version != 3.0:
                    raise Exception("Unsupported SFD version: %f" % version)

            elif key == "FontName":
                info.postscriptFontName = value
            elif key == "FullName":
                info.postscriptFullName = value
            elif key == "FamilyName":
                info.familyName = value
            elif key == "DefaultBaseFilename":
                pass  # info.XXX = value
            elif key == "Weight":
                info.postscriptWeightName = value
            elif key == "Copyright":
                # Decode escape sequences.
                info.copyright = codecs.escape_decode(value)[0].decode("utf-8")
            elif key == "Comments":
                info.note = value
            elif key == "UComments":
                old = info.note
                info.note = SFDReadUTF7(value)
                if old:
                    info.note += "\n" + old
            elif key == "FontLog":
                if not info.note:
                    info.note = ""
                else:
                    info.note = "\n"
                info.note += "Font log:\n" + SFDReadUTF7(value)
            elif key == "Version":
                info.versionMajor, info.versionMinor = parseVersion(value)
            elif key == "ItalicAngle":
                info.italicAngle = info.postscriptSlantAngle = float(value)
            elif key == "UnderlinePosition":
                info.postscriptUnderlinePosition = float(value)
            elif key == "UnderlineWidth":
                info.postscriptUnderlineThickness = float(value)
            elif key in "Ascent":
                info.ascender = int(value)
            elif key in "Descent":
                info.descender = -int(value)
            elif key == "sfntRevision":
                pass  # info.XXX = int(value, 16)
            elif key == "WidthSeparation":
                pass  # XXX = float(value) # auto spacing
            elif key == "LayerCount":
                self._layers = int(value) * [None]
                self._layerType = int(value) * [None]
            elif key == "Layer":
                m = LAYER_RE.match(value)
                idx, quadratic, name, _ = m.groups()
                idx = int(idx)
                quadratic = bool(int(quadratic))
                name = SFDReadUTF7(name)
                if idx == 1:
                    self._layers[idx] = font.layers.defaultLayer
                else:
                    self._layers[idx] = name
                self._layerType[idx] = quadratic
            elif key == "DisplayLayer":
                pass  # XXX default layer
            elif key == "DisplaySize":
                pass  # GUI
            elif key == "AntiAlias":
                pass  # GUI
            elif key == "FitToEm":
                pass  # GUI
            elif key == "WinInfo":
                pass  # GUI
            elif key == "Encoding":
                pass  # XXX encoding = value
            elif key == "CreationTime":
                v = datetime.utcfromtimestamp(int(value))
                info.openTypeHeadCreated = v.strftime("%Y/%m/%d %H:%M:%S")
            elif key == "ModificationTime":
                pass  # XXX
            elif key == "FSType":
                v = int(value)
                v = [bit for bit in range(16) if v & (1 << bit)]
                info.openTypeOS2Type = v
            elif key == "PfmFamily":
                pass  # info.XXX = value
            elif key in ("TTFWeight", "PfmWeight"):
                info.openTypeOS2WeightClass = int(value)
            elif key == "TTFWidth":
                info.openTypeOS2WidthClass = int(value)
            elif key == "Panose":
                v = value.split()
                info.openTypeOS2Panose = [int(n) for n in v]
            elif key == "LineGap":
                info.openTypeHheaLineGap = int(value)
            elif key == "VLineGap":
                info.openTypeVheaVertTypoLineGap = int(value)
            elif key == "HheadAscent":
                info.openTypeHheaAscender = int(value)
            elif key == "HheadDescent":
                info.openTypeHheaDescender = int(value)
            elif key == "OS2TypoLinegap":
                info.openTypeOS2TypoLineGap = int(value)
            elif key == "OS2Vendor":
                info.openTypeOS2VendorID = value.strip("'")
            elif key == "OS2FamilyClass":
                v = int(value)
                info.openTypeOS2FamilyClass = (v >> 8, v & 0xff)
            elif key == "OS2Version":
                pass  # XXX
            elif key == "OS2_WeightWidthSlopeOnly":
                if int(value):
                    if not info.openTypeOS2Selection:
                        info.openTypeOS2Selection = []
                    info.openTypeOS2Selection += [8]
            elif key == "OS2_UseTypoMetrics":
                if not info.openTypeOS2Selection:
                    info.openTypeOS2Selection = []
                info.openTypeOS2Selection += [7]
            elif key == "OS2CodePages":
                pass  # XXX
            elif key == "OS2UnicodeRanges":
                pass  # XXX
            elif key == "OS2TypoAscent":
                info.openTypeOS2TypoAscender = int(value)
            elif key == "OS2TypoDescent":
                info.openTypeOS2TypoDescender = int(value)
            elif key == "OS2WinAscent":
                info.openTypeOS2WinAscent = int(value)
            elif key == "OS2WinDescent":
                info.openTypeOS2WinDescent = int(value)
            elif key in self._OFFSET_METRICS:
                if int(value):
                    offsetMetrics.append(self._OFFSET_METRICS[key])
            elif key == "OS2SubXSize":
                info.openTypeOS2SubscriptXSize = int(value)
            elif key == "OS2SubYSize":
                info.openTypeOS2SubscriptYSize = int(value)
            elif key == "OS2SubXOff":
                info.openTypeOS2SubscriptXOffset = int(value)
            elif key == "OS2SubYOff":
                info.openTypeOS2SubscriptYOffset = int(value)
            elif key == "OS2SupXSize":
                info.openTypeOS2SuperscriptXSize = int(value)
            elif key == "OS2SupYSize":
                info.openTypeOS2SuperscriptYSize = int(value)
            elif key == "OS2SupXOff":
                info.openTypeOS2SuperscriptXOffset = int(value)
            elif key == "OS2SupYOff":
                info.openTypeOS2SuperscriptYOffset = int(value)
            elif key == "OS2StrikeYSize":
                info.openTypeOS2StrikeoutSize = int(value)
            elif key == "OS2StrikeYPos":
                info.openTypeOS2StrikeoutPosition = int(value)
            elif key == "OS2CapHeight":
                info.capHeight = int(value)
            elif key == "OS2XHeight":
                info.xHeight = int(value)
            elif key == "UniqueID":
                info.postscriptUniqueID = int(value)
            elif key == "LangName":
                self._parseNames(value)
            elif key == "GaspTable":
                self._parseGaspTable(value)
            elif key == "BeginPrivate":
                section, i = self._getSection(data, i, "EndPrivate", value)
                self._parsePrivateDict(section)
            elif key == "BeginChars":
                charData, i = self._getSection(data, i, "EndChars")
            elif key == "Grid":
                grid, i = self._getSection(data, i, "EndSplineSet")
                self._parseGrid(grid)
            elif key == "KernClass2":
                i = self._parseKernClass(data, i, value)
            elif key == "Lookup":
                self._parseLookup(value)
            elif key == "AnchorClass2":
                self._parseAnchorClass(value)
            elif key == "XUID":
                pass  # XXX
            elif key == "UnicodeInterp":
                pass  # XXX
            elif key == "NameList":
                pass  # XXX
            elif key == "DEI":
                pass
            elif key == "EndSplineFont":
                break

        #else:
        #    print(key, value)

        for idx, name in enumerate(self._layers):
            if not isinstance(name, (str, unicode)):
                continue
            if idx not in (0, 1) and self._layers.count(name) != 1:
                # FontForge layer names are not unique, make sure ours are.
                name += "_%d" % idx
            self._layers[idx] = font.newLayer(name)

        if isdir:
            assert charData is None
            import glob
            charData = []
            for filename in glob.iglob(os.path.join(self._path, '*.glyph')):
                with open(filename) as fp:
                    charData += fp.readlines()

        self._parseChars(charData)

        # We can’t insert the references while parsing the glyphs since
        # FontForge uses glyph indices so we need to know the glyph order
        # first.
        self._processReferences()

        # Same for kerning.
        self._processKerns()

        # We process all kern classes together so we can detect UFO group
        # overlap issue and act accordingly.
        subtables = []
        for lookup in self._gposLookups:
            for subtable in self._gposLookups[lookup]:
                if subtable in self._kernClasses:
                    subtables.append(self._kernClasses[subtable])
        processKernClasses(self._font, subtables)

        # Need to run after parsing glyphs so that we can calculate font
        # bounding box.
        self._fixOffsetMetrics(offsetMetrics)

        self._writeGSUBGPOS(isgpos=False)
        self._writeGSUBGPOS(isgpos=True)
        self._writeGDEF()

        # FontForge does not have an explicit UPEM setting, it is the sum of its
        # ascender and descender.
        info.unitsPerEm = info.ascender - info.descender

        # Fallback for missing styleName.
        # FontForge does more magic in its _GetModifiers functions, but this is
        # a stripped down version.
        if info.styleName is None:
            value = "Regular"
            if info.postscriptFontName and "-" in info.postscriptFontName:
                value = info.postscriptFontName.split("-", 1)[1]
            elif info.postscriptWeightName:
                value = info.postscriptWeightName
            info.styleName = value
Exemplo n.º 57
0
    def get_song(self, videoId: str) -> Dict:
        """
        Returns metadata about a song or video.

        :param videoId: Video id
        :return: Dictionary with song metadata.

        Example::

            {
              "videoId": "ZrOKjDZOtkA",
              "title": "Wonderwall (Remastered)",
              "lengthSeconds": "259",
              "keywords": [
                "Oasis",
                "(What's",
                "..."
              ],
              "channelId": "UCmMUZbaYdNH0bEd1PAlAqsA",
              "isOwnerViewing": false,
              "shortDescription": "Provided to YouTube by Ignition...",
              "isCrawlable": true,
              "thumbnail": {
                "thumbnails": [
                  {
                    "url": "https://i.ytimg.com/vi/ZrOKjDZOtkA/maxresdefault.jpg",
                    "width": 1920,
                    "height": 1080
                  }
                ]
              },
              "averageRating": 4.5673099,
              "allowRatings": true,
              "viewCount": "18136380",
              "author": "Oasis - Topic",
              "isPrivate": false,
              "isUnpluggedCorpus": false,
              "isLiveContent": false,
              "provider": "Ignition",
              "artists": [
                "Oasis"
              ],
              "copyright": "℗ 2014 Big Brother Recordings ...",
              "production": [
                "Composer: Noel Gallagher",
                "Lyricist: Noel Gallagher",
                "Producer: Owen Morris & Noel Gallagher"
              ],
              "release": "2014-09-29",
              "streamingData": {
                "expiresInSeconds": "21540",
                "formats": [
                  {
                    "itag": 18,
                    "mimeType": "video/mp4; codecs=\"avc1.42001E, mp4a.40.2\"",
                    "bitrate": 306477,
                    "width": 360,
                    "height": 360,
                    "lastModified": "1574970034520502",
                    "contentLength": "9913027",
                    "quality": "medium",
                    "fps": 25,
                    "qualityLabel": "360p",
                    "projectionType": "RECTANGULAR",
                    "averageBitrate": 306419,
                    "audioQuality": "AUDIO_QUALITY_LOW",
                    "approxDurationMs": "258809",
                    "audioSampleRate": "44100",
                    "audioChannels": 2,
                    "signatureCipher": "..."
                  }
                ],
                "adaptiveFormats": []
              }
            }

        """
        endpoint = "https://www.youtube.com/get_video_info"
        params = {"video_id": videoId, "hl": self.language, "el": "detailpage"}
        response = requests.get(endpoint, params, headers=self.headers, proxies=self.proxies)
        text = parse_qs(response.text)
        if 'player_response' not in text:
            return text
        player_response = json.loads(text['player_response'][0])
        song_meta = player_response['videoDetails']
        if song_meta['shortDescription'].endswith("Auto-generated by YouTube."):
            try:
                description = song_meta['shortDescription'].split('\n\n')
                for i, detail in enumerate(description):
                    description[i] = codecs.escape_decode(detail)[0].decode('utf-8')
                song_meta['provider'] = description[0].replace('Provided to YouTube by ', '')
                song_meta['artists'] = [artist for artist in description[1].split(' · ')[1:]]
                song_meta['copyright'] = description[3]
                song_meta['release'] = None if len(description) < 5 else description[4].replace(
                    'Released on: ', '')
                song_meta['production'] = None if len(description) < 6 else [
                    pub for pub in description[5].split('\n')
                ]
            except (KeyError, IndexError):
                pass
        song_meta['streamingData'] = player_response['streamingData']
        return song_meta
Exemplo n.º 58
0
def parse_txt(mnemo, attrib, txt, loc_db):
    """Parse an assembly listing. Returns an AsmCfg instance

    @mnemo: architecture used
    @attrib: architecture attribute
    @txt: assembly listing
    @loc_db: the LocationDB instance used to handle labels of the listing

    """

    C_NEXT = asmblock.AsmConstraint.c_next
    C_TO = asmblock.AsmConstraint.c_to

    lines = []
    # parse each line
    for line in txt.split('\n'):
        # empty
        if EMPTY_RE.match(line):
            continue
        # comment
        if COMMENT_RE.match(line):
            continue
        # labels to forget
        if FORGET_LABEL_RE.match(line):
            continue
        # label beginning with .L
        match_re = LABEL_RE.match(line)
        if match_re:
            label_name = match_re.group(1)
            label = loc_db.get_or_create_name_location(label_name)
            lines.append(label)
            continue
        # directive
        if DIRECTIVE_START_RE.match(line):
            match_re = DIRECTIVE_RE.match(line)
            directive = match_re.group(1)
            if directive in ['text', 'data', 'bss']:
                continue
            if directive in ['string', 'ascii']:
                # XXX HACK
                line = line.replace(r'\n', '\n').replace(r'\r', '\r')
                raw = line[line.find(r'"') + 1:line.rfind(r'"')]
                raw = codecs.escape_decode(raw)[0]
                if directive == 'string':
                    raw += b"\x00"
                lines.append(asmblock.AsmRaw(raw))
                continue
            if directive == 'ustring':
                # XXX HACK
                line = line.replace(r'\n', '\n').replace(r'\r', '\r')
                raw = line[line.find(r'"') + 1:line.rfind(r'"')] + "\x00"
                raw = codecs.escape_decode(raw)[0]
                out = b''
                for i in range(len(raw)):
                    out += raw[i:i + 1] + b'\x00'
                lines.append(asmblock.AsmRaw(out))
                continue
            if directive in declarator:
                data_raw = line[match_re.end():].split(' ', 1)[1]
                data_raw = data_raw.split(',')
                size = declarator[directive]
                expr_list = []

                # parser

                for element in data_raw:
                    element = element.strip()
                    element_parsed = base_expr.parseString(element)[0]
                    element_expr = asm_ast_to_expr_with_size(
                        element_parsed, loc_db, size)
                    expr_list.append(element_expr)

                raw_data = asmblock.AsmRaw(expr_list)
                raw_data.element_size = size
                lines.append(raw_data)
                continue
            if directive == 'comm':
                # TODO
                continue
            if directive == 'split':  # custom command
                lines.append(DirectiveSplit())
                continue
            if directive == 'dontsplit':  # custom command
                lines.append(DirectiveDontSplit())
                continue
            if directive == "align":
                align_value = int(line[match_re.end():], 0)
                lines.append(DirectiveAlign(align_value))
                continue
            if directive in [
                    'file', 'intel_syntax', 'globl', 'local', 'type', 'size',
                    'align', 'ident', 'section'
            ]:
                continue
            if directive[0:4] == 'cfi_':
                continue

            raise ValueError("unknown directive %s" % directive)

        # label
        match_re = LABEL_RE.match(line)
        if match_re:
            label_name = match_re.group(1).encode()
            label = loc_db.get_or_create_name_location(label_name)
            lines.append(label)
            continue

        # code
        if ';' in line:
            line = line[:line.find(';')]
        line = line.strip(' ').strip('\t')
        instr = mnemo.fromstring(line, loc_db, attrib)
        lines.append(instr)

    asmblock.log_asmblock.info("___pre asm oki___")
    # make asmcfg

    cur_block = None
    state = STATE_NO_BLOC
    i = 0
    asmcfg = asmblock.AsmCFG(loc_db)
    block_to_nlink = None
    delayslot = 0
    while i < len(lines):
        if delayslot:
            delayslot -= 1
            if delayslot == 0:
                state = STATE_NO_BLOC
        line = lines[i]
        # no current block
        if state == STATE_NO_BLOC:
            if isinstance(line, DirectiveDontSplit):
                block_to_nlink = cur_block
                i += 1
                continue
            elif isinstance(line, DirectiveSplit):
                block_to_nlink = None
                i += 1
                continue
            elif not isinstance(line, LocKey):
                # First line must be a label. If it's not the case, generate
                # it.
                loc = loc_db.add_location()
                cur_block = asmblock.AsmBlock(loc_db,
                                              loc,
                                              alignment=mnemo.alignment)
            else:
                cur_block = asmblock.AsmBlock(loc_db,
                                              line,
                                              alignment=mnemo.alignment)
                i += 1
            # Generate the current block
            asmcfg.add_block(cur_block)
            state = STATE_IN_BLOC
            if block_to_nlink:
                block_to_nlink.addto(
                    asmblock.AsmConstraint(cur_block.loc_key, C_NEXT))
            block_to_nlink = None
            continue

        # in block
        elif state == STATE_IN_BLOC:
            if isinstance(line, DirectiveSplit):
                state = STATE_NO_BLOC
                block_to_nlink = None
            elif isinstance(line, DirectiveDontSplit):
                state = STATE_NO_BLOC
                block_to_nlink = cur_block
            elif isinstance(line, DirectiveAlign):
                cur_block.alignment = line.alignment
            elif isinstance(line, asmblock.AsmRaw):
                cur_block.addline(line)
                block_to_nlink = cur_block
            elif isinstance(line, LocKey):
                if block_to_nlink:
                    cur_block.addto(asmblock.AsmConstraint(line, C_NEXT))
                    block_to_nlink = None
                state = STATE_NO_BLOC
                continue
            # instruction
            elif isinstance(line, instruction):
                cur_block.addline(line)
                block_to_nlink = cur_block
                if not line.breakflow():
                    i += 1
                    continue
                if delayslot:
                    raise RuntimeError("Cannot have breakflow in delayslot")
                if line.dstflow():
                    for dst in line.getdstflow(loc_db):
                        if not isinstance(dst, ExprId):
                            continue
                        if dst in mnemo.regs.all_regs_ids:
                            continue
                        cur_block.addto(asmblock.AsmConstraint(dst.name, C_TO))

                if not line.splitflow():
                    block_to_nlink = None

                delayslot = line.delayslot + 1
            else:
                raise RuntimeError("unknown class %s" % line.__class__)
        i += 1

    for block in asmcfg.blocks:
        # Fix multiple constraints
        block.fix_constraints()

        # Log block
        asmblock.log_asmblock.info(block)
    return asmcfg
Exemplo n.º 59
0
    def test_escape_decode(self):
        #sanity checks

        value, length = codecs.escape_decode("ab\a\b\t\n\r\f\vba")
        self.assertEqual(value, 'ab\x07\x08\t\n\r\x0c\x0bba')
        self.assertEqual(length, 11)
        
        value, length = codecs.escape_decode("\\a")
        self.assertEqual(value, '\x07')
        self.assertEqual(length, 2)
        
        
        value, length = codecs.escape_decode("ab\a\b\t\n\r\f\vbaab\\a\\b\\t\\n\\r\\f\\vbaab\\\a\\\b\\\t\\\n\\\r\\\f\\\vba")
        self.assertEqual(value, 'ab\x07\x08\t\n\r\x0c\x0bbaab\x07\x08\t\n\r\x0c\x0bbaab\\\x07\\\x08\\\t\\\r\\\x0c\\\x0bba')
        self.assertEqual(length, 47)
        
        value, length = codecs.escape_decode("\\\a")
        self.assertEqual(value, '\\\x07')
        self.assertEqual(length, 2)

        self.assertEqual("abc", codecs.escape_decode("abc", None)[0])
        self.assertEqual("?\\", codecs.escape_decode("\\x", 'replace')[0])
        self.assertEqual("?\\x", codecs.escape_decode("\\x2", 'replace')[0])
        self.assertEqual("?\\x", codecs.escape_decode("\\xI", 'replace')[0])
        self.assertEqual("?\\xI", codecs.escape_decode("\\xII", 'replace')[0])
        self.assertEqual("?\\x1", codecs.escape_decode("\\x1I", 'replace')[0])
        self.assertEqual("?\\xI", codecs.escape_decode("\\xI1", 'replace')[0])
Exemplo n.º 60
0
 def decode_string(text):
     return codecs.escape_decode(text.replace('=', '\\x'))[0].decode()