def find_binary_exts(buffer): size = len(buffer) if size <= 0: return array = [] end = size - 1 pos = 0 while pos < end: pos += 1 # 从第二个直接开始检查 if buffer[pos] != unicode_dot: continue if not Text.isword_unicode(buffer[pos - 1]): continue b = buffer[pos + 1] if not Text.isabc_unicode(b): continue s = chr(b) for i in range(2, min(6, end - pos)): # 后缀长度不超过5 b = buffer[pos + i] if Text.isword_unicode(b): s += chr(b) else: pos += i - 1 break array.append(s) if not array: return ndict = {} for s in array: if s in ndict: ndict[s] += 1 else: ndict[s] = 1 return ndict
def find_binary_paths(buffer, exts): size = len(buffer) if size <= 0: return array = [] end = size - 1 pos = 0 while pos < end: pos += 1 # 从第二个直接开始检查 if buffer[pos] != unicode_dot: continue for ebs in exts: isext = True for i in range(len(ebs)): if buffer[pos + i + 1] != ebs[i]: isext = False break if not isext: continue s, p = '', -1 for i in range(pos - 1, -1, -1): b = buffer[i] if b in special_unis or Text.isword_unicode(b): s = chr(b) + s else: p = i + 1 break if p > 0: array.append((p, '%s.%s' % (s, Text.unicodes2str(ebs)))) pos += len(ebs) break return array
def parentname(p, sep=None): sep = sep or os.sep end = len(p) - 1 if p[end] == sep: end -= 1 s = Text.last_find(p, sep, end) return p[Text.last_find(p, sep, s - 1) + 1:s]
def _out_one(self, build_look_map, is_force): edit_dict = {} # load old sheets if not is_force and os.path.isfile(self._ulo_path): ba = Bit.ByteArray() with open(self._ulo_path, 'rb') as fp: ba.init_buffer(fp.read()) nn = ba.read_u16() for i in range(nn): excel_name, sheet_name = Text.unpack(ba.read_utf8(), '.') sheet_buf = ba.read_bytes(ba.read_u32()) book_look_map = build_look_map.get(excel_name) if not book_look_map: continue # 这边被剔除了 new_sheet_dic = Collect.gen_dict(edit_dict, excel_name) if book_look_map == _Fmt.NO_DIFF_FLAG: new_sheet_dic[sheet_name] = sheet_buf # 这个excel没有变更,全部保留。 continue if sheet_name in book_look_map: new_sheet_dic[sheet_name] = sheet_buf # 只保留记录过的表 # merge sheets for excel_name, build_dict in self._new_dict.items(): Collect.gen_dict(edit_dict, excel_name).update(build_dict) # output sheets sheet_nums = 0 if edit_dict: ba = Bit.ByteArray().init_capacity(1024 * 1024 * 8) ba.set_position(2) for excel_name, sheet_dict in edit_dict.items(): sheet_nums += self._build_buf(excel_name, sheet_dict, ba) # log.i('write:', sheet_nums, excel_name, sheet_name) ba.set_position(0).write_u16(sheet_nums) # 写入表的数量 with open(self._ulo_path, 'wb') as fp: fp.write(ba.slim_buffer()) return sheet_nums
def split_res(self): # with open(os.path.join(self.idir, 'package.was'), 'rb') as fp: with open(os.path.join(self.idir, 'patch.was'), 'rb') as fp: buffer = fp.read() buffer = buffer[32:] e = len(buffer) p = 0 while p < e: p += 4 b = buffer[p:p+4] fs = Bit.u32_from(b) p += 24 b = buffer[p:p+4] ns = Bit.u32_from(b) p += 4 fn = Text.unicodes2str(buffer[p:p+ns]) p += ns + 1 if fn.endswith('.luac'): fn = 'src/' + fn else: fn = 'res/' + fn print(p, fs, fn) fb = buffer[p:p+fs] p += fs fp = os.path.join(self.odir, fn) FS.make_parent(fp) with open(fp, 'wb') as fp: fp.write(fb) print(p, e)
def e(self, *args, sep=None): if self._level < ERROR: return sep = DEFAULT_SEP if sep is None else sep tag = self._tag % 'E' msg = tag + sep + Text.join(args, sep) if Cross.IS_WINDOWS: self._windows_print(msg, 0x0c) else: print('\033[0;31m' + msg + '\033[0m', sep=sep)
def filtered(): ext_map = read() key_arr = [] for k in ext_map: n = len(k) if n == 1: key_arr.append(k) continue if n == 2 and k != 'js': key_arr.append(k) continue if not Text.islower_str(k) and not Text.isupper_str(k): key_arr.append(k) continue if ext_map[k] < 500: key_arr.append(k) continue for k in key_arr: ext_map.pop(k) save(ext_map)
def filtered(): paths = {} with open(os.path.join(self.root, self.name + '_files0.txt'), 'r') as fp: for line in fp.readlines(): line = line.strip() while True: m = Crypto.md5_bytes(bytes(Text.str2unicodes(line))) p = os.path.join(self.idir, m) if os.path.isfile(p): paths[m] = line break if len(line) > 0: line = line[1:] else: break with open(os.path.join(self.root, self.name + '_files.txt'), 'w') as fp: for k in paths: fp.writelines('%s:%s\n' % (k, paths[k]))