def _out_one(self, build_look_map, is_force): edit_dict = {} # load old sheets if not is_force and os.path.isfile(self._ulo_path): ba = Bit.ByteArray() with open(self._ulo_path, 'rb') as fp: ba.init_buffer(fp.read()) nn = ba.read_u16() for i in range(nn): excel_name, sheet_name = Text.unpack(ba.read_utf8(), '.') sheet_buf = ba.read_bytes(ba.read_u32()) book_look_map = build_look_map.get(excel_name) if not book_look_map: continue # 这边被剔除了 new_sheet_dic = Collect.gen_dict(edit_dict, excel_name) if book_look_map == _Fmt.NO_DIFF_FLAG: new_sheet_dic[sheet_name] = sheet_buf # 这个excel没有变更,全部保留。 continue if sheet_name in book_look_map: new_sheet_dic[sheet_name] = sheet_buf # 只保留记录过的表 # merge sheets for excel_name, build_dict in self._new_dict.items(): Collect.gen_dict(edit_dict, excel_name).update(build_dict) # output sheets sheet_nums = 0 if edit_dict: ba = Bit.ByteArray().init_capacity(1024 * 1024 * 8) ba.set_position(2) for excel_name, sheet_dict in edit_dict.items(): sheet_nums += self._build_buf(excel_name, sheet_dict, ba) # log.i('write:', sheet_nums, excel_name, sheet_name) ba.set_position(0).write_u16(sheet_nums) # 写入表的数量 with open(self._ulo_path, 'wb') as fp: fp.write(ba.slim_buffer()) return sheet_nums
def laya_sk(skpath, imagepath=None, outputdir=None): from jonlin.utils import Bit with open(skpath, 'rb') as fp: ba = Bit.ByteArray().init_buffer(fp.read()) ani_version = ba.read_utf8() ani_classname = ba.read_utf8() ani_names = ba.read_utf8() ani_count = ba.read_u8() pub_data_pos = ba.read_u32() ext_data_pos = ba.read_u32() # print(pub_data_pos, ext_data_pos) ba.set_position(ext_data_pos) ext_buffer = ba.read_bytes(ba.get_available()) ba = Bit.ByteArray().init_buffer(ext_buffer) tex_count = ba.read_int() tex_array = ba.read_utf8().split('\n') tex_books = {} tex_frames = [] for i in range(tex_count): tex_name = tex_array[i * 2 + 1] tex_books[tex_name] = tex_array[i * 2] x = ba.read_float() y = ba.read_float() w = ba.read_float() h = ba.read_float() fx = ba.read_float() fy = ba.read_float() fw = ba.read_float() fh = ba.read_float() # print(tex_name, x, y, w, h, fx, fy, fw, fh) tex_frames.append((tex_name, x, y, w, h, fx, fy, fw, fh)) # crop images atlas_root = os.path.dirname(skpath) if outputdir is None: imagesdir = os.path.join(atlas_root, FS.filename(skpath) + '_images') else: imagesdir = outputdir # if not os.path.isdir(imagesdir): # os.makedirs(imagesdir) image_map = {} for src in set(tex_books.values()): image_map[src] = Image.open(os.path.join(atlas_root, src)) for frame in tex_frames: tex_name = frame[0] x = frame[1] y = frame[2] w = frame[3] h = frame[4] rect = (x, y, x + w, y + h) image = image_map[tex_books[tex_name]] item_img = image.crop(rect) item_src = os.path.join(imagesdir, tex_name + '.png') FS.make_parent(item_src) item_img.save(item_src)
def _gen_buffer(self, sheet_obj, index_info, sheet_area, data_cols, errors): data_ncol = len(data_cols) data_keys = sheet_area.header.keys[1:] data_lets = sheet_area.header.lets[1:] is_list_t = _Lua2.is_list_table(index_info) # write to buffer ba = Bit.ByteArray().init_capacity(index_info.amount * data_ncol * 256) if is_list_t: ba.write_byte(1) else: ba.write_byte(0) ba.write_byte( self._ulo_type[index_info.let]) # write index let type ba.write_u8(data_ncol) for n in range(data_ncol): self._write_string(ba, data_keys[n]) ba.write_byte(self._ulo_type[data_lets[n]]) # write key let type ba.write_u16(index_info.amount) for m in range(index_info.amount): r = index_info.row_ns[m] if not is_list_t: try: self._write_cell(ba, index_info.values[m], index_info.let) except _CellError as err: errors.append(err) for n in range(data_ncol): t = data_lets[n] try: v = _Fmt.guess_value(sheet_obj.cell_value(r, data_cols[n]), t) self._write_cell(ba, v, t) except _CellError as err: errors.append(err) return ba.slim_buffer()
def pack_all(self, source): ba = Bit.ByteArray().init_capacity(1024 * 1024 * 2) ba.write_u16(len(source)) # 写入表的数量 for si, keys in source.items(): buff = self.pack_one(si, keys) size = len(buff) ba.write_utf8(si.name) ba.write_u32(size) ba.write_bytes(buff, size) return ba.slim_buffer()
def pack_one(self, si, keys): ncols, nrows = len(keys), len(si.datas) ba = Bit.ByteArray().init_capacity(ncols * nrows * 32) ba.write_u16(nrows) ba.write_u8(ncols) for k in keys: c = k.i t = si.types[c] if t.endswith('[]'): f = self._write_json else: f = self.writer_dict[t] ba.write_utf8(k.k) # 字段 ba.write_u8(self.itypes[t]) # 类型 for line in si.datas: f(ba, line[c]) return ba.slim_buffer()
def _out_pkg(self, build_look_map): if not os.path.isdir(self._ulo_path): os.makedirs(self._ulo_path) for excel_name, sheet_dict in self._new_dict.items(): ba = Bit.ByteArray().init_capacity(1024 * 1024 * 8) ba.set_position(2) sheet_nums = self._build_buf(excel_name, sheet_dict, ba) ba.set_position(0).write_u16(sheet_nums) # 写入表的数量 with open(os.path.join(self._ulo_path, excel_name + '.ulo'), 'wb') as fp: fp.write(ba.slim_buffer()) total_nums = 0 for name in os.listdir(self._ulo_path): ulofile = os.path.join(self._ulo_path, name) if FS.filename(name) not in build_look_map: log.w('删除表文件:', ulofile) os.remove(ulofile) else: with open(ulofile, 'rb') as fp: total_nums += Bit.u16_from(fp.read(2)) return total_nums