def negotiate_sizes(self, options): """ Step through the NAWS handshake. Args: option (list): The incoming NAWS options. """ if len(options) == 4: # NAWS is negotiated with 16bit words width = options[0] + options[1] self.protocol.protocol_flags["SCREENWIDTH"][0] = int(codecs_encode(width, "hex"), 16) height = options[2] + options[3] self.protocol.protocol_flags["SCREENHEIGHT"][0] = int(codecs_encode(height, "hex"), 16)
def get_file_digest(file_path): hash_provider = hashes.Hash(hashes.SHA256(), backend=default_backend()) with open(file_path, 'rb') as f: for block in iter(lambda:f.read(io.DEFAULT_BUFFER_SIZE),b''): hash_provider.update(block) hash_bytes = hash_provider.finalize() return codecs_encode(hash_bytes, 'hex_codec').decode()
def calculate(amount, itemcode): # calculate chat code # itemcode from base64 to hex hex_itemcode = b64decode(itemcode).hex() # amount to hex hex_amount = '{0:02x}'.format(int(amount)) # calculate item + amount hexa = hex_itemcode[:2] + hex_amount + hex_itemcode[4:] # encode to base64 b64 = codecs_encode(codecs_decode(hexa, 'hex'), 'base64').decode() # Next 2 lines to remove \n from b64 strb64 = str(b64) strb64 = strb64[:-1] return "[&" + strb64 + "]"
def clean_text(text_to_convert, count_unicode=False): '''Removes unicode and escape chars from text_to_convert; also counts the number of tweets which contained unicode. Parameters ---------- text_to_convert: (str) to standardize count_unicode: either (True) for tweet text or (False) for a tweet hashtag Returns ------- an ascii (str) without unicode and escape chars Refer to here for more details on tweet formating: https://dev.twitter.com/streaming/overview/processing ''' try: # https://docs.python.org/2/howto/unicode.html ascii_txt = codecs_encode(text_to_convert, 'ascii') except UnicodeEncodeError: if count_unicode: global unicode_tweets_count # fine for now unicode_tweets_count += 1 # remove all non ascii chars from the unicode string ascii_txt = codecs_encode(text_to_convert, 'ascii', 'ignore') ### replace the whitespace chars with a single space # ws_replaced_txt = ''.join([' ' if char in chars_to_replace_with_space # else char for char in ascii_txt]) ws_replaced_txt = translate(ascii_txt, trans_table) return ws_replaced_txt
def parse_xml(pdf): x = soup(codecs_encode(pdf,'utf8','ignore')).findAll('page') cols = ['page','font','top','left','width','height','text'] g = pd_DataFrame(columns=cols) for pg in x: idx = x.index(pg)+1 pg = str(pg) line_iter = re_findall(r'(<text.*?</text>)',pg) for it in line_iter: a = ['page']+re_findall('([a-zA-Z]+)+\=', it)+['text'] text_attrs = it[5:it.find('>')].strip() text_contents = str(soup(it).text) b = [idx]+map(lambda s: int(s),re_findall('[0-9]+', text_attrs))+[text_contents] if text_contents.strip() != '': g = g.append(dict(zip(a,b)),ignore_index=True) return g
script_file.write( "from codecs import decode as codecs_decode\n".encode()) script_file.write( "from binascii import unhexlify as binascii_unhexlify\n\n".encode( )) script_file.write("file_names = [".encode()) for path in paths: while path.find('/') != -1: path = path[path.find('/') + 1:] script_file.write(('"' + path + '", ').encode()) script_file.write("]\n\n".encode()) script_file.write("byte_files = [".encode()) for file in paths: with open(file, 'rb') as reading_file: bytes_of_file = reading_file.read() bytes_of_file = codecs_encode(bytes_of_file, 'zip').hex() script_file.write(('"' + str(bytes_of_file) + '",\n').encode()) script_file.write("]\n\n".encode("utf-8")) script_file.write("for i in range(len(file_names)):\n".encode()) script_file.write(" if file_exists(file_names[i]):\n".encode()) script_file.write( " if input('File: ' + file_names[i] + ' already exists. Do you want to rewrite it (y/n)?').lower() != 'y':\n" .encode()) script_file.write( " print('File: ' + file_names[i] + ' has been skipped!')\n" .encode()) script_file.write(" continue\n".encode()) script_file.write( " with open(file_names[i], 'wb') as new_file:\n".encode()) script_file.write(