def backup_maps(maps): """Copy the given maps to the backup.""" back_zip = BACKUPS['backup_zip'] # type: ZipFile # Allow removing old maps when we overwrite objects map_dict = { p2c.path: p2c for p2c in BACKUPS['back'] } # You can't remove files from a zip, so we need to create a new one! # Here we'll just add entries into BACKUPS['back']. # Also check for overwriting for p2c in maps: scr_path = p2c.path + '.jpg' map_path = p2c.path + '.p2c' if ( map_path in zip_names(back_zip) or scr_path in zip_names(back_zip) ): if not messagebox.askyesno( title='Overwrite File?', message='This filename is already in the backup.' 'Do you wish to overwrite it? ' '({})'.format(p2c.title), parent=window, icon=messagebox.QUESTION, ): continue new_item = p2c.copy() map_dict[p2c.path] = new_item BACKUPS['back'] = list(map_dict.values()) refresh_back_details()
def backup_maps(maps): """Copy the given maps to the backup.""" back_zip = BACKUPS['backup_zip'] # type: ZipFile # Allow removing old maps when we overwrite objects map_dict = {p2c.filename: p2c for p2c in BACKUPS['back']} # You can't remove files from a zip, so we need to create a new one! # Here we'll just add entries into BACKUPS['back']. # Also check for overwriting for p2c in maps: scr_path = p2c.filename + '.jpg' map_path = p2c.filename + '.p2c' if (map_path in zip_names(back_zip) or scr_path in zip_names(back_zip)): if not messagebox.askyesno( title='Overwrite File?', message=_('This filename is already in the backup.' 'Do you wish to overwrite it? ' '({})').format(p2c.title), parent=window, icon=messagebox.QUESTION, ): continue new_item = p2c.copy() map_dict[p2c.filename] = new_item BACKUPS['back'] = list(map_dict.values()) refresh_back_details()
def load_backup(zip_file): """Load in a backup file.""" maps = [] puzzles = [ file[:-4] # Strip extension for file in zip_names(zip_file) if file.endswith('.p2c') ] # Each P2C init requires reading in the properties file, so this may take # some time. Use a loading screen. reading_loader.set_length('READ', len(puzzles)) LOGGER.info('Loading {} maps..', len(puzzles)) with reading_loader: for file in puzzles: new_map = P2C.from_file(file, zip_file) maps.append(new_map) LOGGER.debug( 'Loading {} map "{}"', 'coop' if new_map.is_coop else 'sp', new_map.title, ) reading_loader.step('READ') LOGGER.info('Done!') return maps
def do_copy(zip_list, done_files): cache_path = os.path.abspath('../cache/') music_samp = os.path.abspath('../sounds/music_samp/') shutil.rmtree(cache_path, ignore_errors=True) shutil.rmtree(music_samp, ignore_errors=True) img_loc = os.path.join('resources', 'bee2') music_loc = os.path.join('resources', 'music_samp') for zip_path in zip_list: if os.path.isfile(zip_path): zip_file = ZipFile(zip_path) else: zip_file = FakeZip(zip_path) with zip_file: for path in zip_names(zip_file): loc = os.path.normcase(path) if not loc.startswith("resources"): continue # Don't re-extract images if loc.startswith(img_loc): continue if loc.startswith(music_loc): dest_path = os.path.join( music_samp, os.path.relpath(path, music_loc) ) os.makedirs(os.path.dirname(dest_path), exist_ok=True) with zip_open_bin(zip_file, path) as src: with open(dest_path, 'wb') as dest: shutil.copyfileobj(src, dest) else: zip_file.extract(path, path=cache_path) with done_files.get_lock(): done_files.value += 1
def load_backup(zip_file): """Load in a backup file.""" maps = [] puzzles = [ file[:-4] # Strip extension for file in zip_names(zip_file) if file.endswith('.p2c') ] # Each P2C init requires reading in the properties file, so this may take # some time. Use a loading screen. reading_loader.set_length('READ', len(puzzles)) LOGGER.info('Loading {} maps..', len(puzzles)) with reading_loader: for file in puzzles: new_map = P2C.from_file(file, zip_file) maps.append(new_map) LOGGER.debug( 'Loading {} map "{}"', 'coop' if new_map.is_coop else 'sp', new_map.title, ) reading_loader.step('READ') LOGGER.info('Done!') # It takes a while before the detail headers update positions, # so delay a refresh call. TK_ROOT.after(500, UI['game_details'].refresh) return maps
def do_copy(zip_list, done_files): cache_path = os.path.abspath('../cache/') music_samp = os.path.abspath('../sounds/music_samp/') shutil.rmtree(cache_path, ignore_errors=True) shutil.rmtree(music_samp, ignore_errors=True) img_loc = os.path.join('resources', 'bee2') music_loc = os.path.join('resources', 'music_samp') for zip_path in zip_list: if os.path.isfile(zip_path): zip_file = ZipFile(zip_path) else: zip_file = FakeZip(zip_path) with zip_file: for path in zip_names(zip_file): loc = os.path.normcase(path) if not loc.startswith("resources"): continue # Don't re-extract images if loc.startswith(img_loc): continue if loc.startswith(music_loc): dest_path = os.path.join(music_samp, os.path.relpath(path, music_loc)) os.makedirs(os.path.dirname(dest_path), exist_ok=True) with zip_open_bin(zip_file, path) as src: with open(dest_path, 'wb') as dest: shutil.copyfileobj(src, dest) else: zip_file.extract(path, path=cache_path) with done_files.get_lock(): done_files.value += 1
def parse(cls, data): conf = data.info.find_key('Config', '') mats = [ prop.value for prop in data.info.find_all('AddIfMat') ] if conf.has_children(): # Allow having a child block to define packlists inline files = [ prop.value for prop in conf ] else: path = 'pack/' + conf.value + '.cfg' try: with data.zip_file.open(path) as f: # Each line is a file to pack. # Skip blank lines, strip whitespace, and # alow // comments. files = [] for line in f: line = utils.clean_line(line) if line: files.append(line) except KeyError as ex: raise FileNotFoundError( '"{}:{}" not in zip!'.format( data.id, path, ) ) from ex if CHECK_PACKFILE_CORRECTNESS: # Use normpath so sep differences are ignored, plus case. zip_files = { os.path.normpath(file).casefold() for file in zip_names(data.zip_file) if file.startswith('resources') } for file in files: # Check to make sure the files exist... file = os.path.join('resources', os.path.normpath(file)).casefold() if file not in zip_files: LOGGER.warning('Warning: "{file}" not in zip! ({pak_id})', file=file, pak_id=data.pak_id, ) return cls( data.id, files, mats, )
def save_backup(): """Save the backup file.""" # We generate it from scratch, since that's the only way to remove # files. new_zip_data = BytesIO() new_zip = ZipFile(new_zip_data, 'w', compression=ZIP_LZMA) maps = [ item.p2c for item in UI['back_details'].items ] if not maps: messagebox.showerror( _('BEE2 Backup'), _('No maps were chosen to backup!'), ) return copy_loader.set_length('COPY', len(maps)) with copy_loader: for p2c in maps: # type: P2C old_zip = p2c.zip_file map_path = p2c.filename + '.p2c' scr_path = p2c.filename + '.jpg' if scr_path in zip_names(old_zip): with zip_open_bin(old_zip, scr_path) as f: new_zip.writestr(scr_path, f.read()) # Copy the map as bytes, so encoded characters are transfered # unaltered. with zip_open_bin(old_zip, map_path) as f: new_zip.writestr(map_path, f.read()) copy_loader.step('COPY') new_zip.close() # Finalize zip with open(BACKUPS['backup_path'], 'wb') as backup: backup.write(new_zip_data.getvalue()) BACKUPS['unsaved_file'] = new_zip_data # Remake the zipfile object, so it's open again. BACKUPS['backup_zip'] = new_zip = ZipFile( new_zip_data, mode='w', compression=ZIP_LZMA, ) # Update the items, so they use this zip now. for p2c in maps: p2c.zip_file = new_zip
def save_backup(): """Save the backup file.""" # We generate it from scratch, since that's the only way to remove # files. new_zip_data = BytesIO() new_zip = ZipFile(new_zip_data, 'w', compression=ZIP_LZMA) maps = [ item.p2c for item in UI['back_details'].items ] if not maps: messagebox.showerror( gettext('BEE2 Backup'), gettext('No maps were chosen to backup!'), ) return copy_loader.set_length('COPY', len(maps)) with copy_loader: for p2c in maps: # type: P2C old_zip = p2c.zip_file map_path = p2c.filename + '.p2c' scr_path = p2c.filename + '.jpg' if scr_path in zip_names(old_zip): with zip_open_bin(old_zip, scr_path) as f: new_zip.writestr(scr_path, f.read()) # Copy the map as bytes, so encoded characters are transfered # unaltered. with zip_open_bin(old_zip, map_path) as f: new_zip.writestr(map_path, f.read()) copy_loader.step('COPY') new_zip.close() # Finalize zip with open(BACKUPS['backup_path'], 'wb') as backup: backup.write(new_zip_data.getvalue()) BACKUPS['unsaved_file'] = new_zip_data # Remake the zipfile object, so it's open again. BACKUPS['backup_zip'] = new_zip = ZipFile( new_zip_data, mode='w', compression=ZIP_LZMA, ) # Update the items, so they use this zip now. for p2c in maps: p2c.zip_file = new_zip
def parse_package(zip_file, info, pak_id, disp_name): """Parse through the given package to find all the components.""" for pre in Property.find_key(info, 'Prerequisites', []).value: if pre.value not in packages: utils.con_log( 'Package "' + pre.value + '" required for "' + pak_id + '" - ignoring package!' ) return False objects = 0 # First read through all the components we have, so we can match # overrides to the originals for comp_type in OBJ_TYPES: allow_dupes = OBJ_TYPES[comp_type].allow_mult # Look for overrides for obj in info.find_all("Overrides", comp_type): obj_id = obj['id'] obj_override[comp_type][obj_id].append( ParseData(zip_file, obj_id, obj, pak_id) ) for obj in info.find_all(comp_type): obj_id = obj['id'] if obj_id in all_obj[comp_type]: if allow_dupes: # Pretend this is an override obj_override[comp_type][obj_id].append( ParseData(zip_file, obj_id, obj, pak_id) ) else: raise Exception('ERROR! "' + obj_id + '" defined twice!') objects += 1 all_obj[comp_type][obj_id] = ObjData( zip_file, obj, pak_id, disp_name, ) img_count = 0 img_loc = os.path.join('resources', 'bee2') for item in zip_names(zip_file): item = os.path.normcase(item).casefold() if item.startswith("resources"): extract_packages.res_count += 1 if item.startswith(img_loc): img_count += 1 return objects, img_count
def parse_package(pack: 'Package'): """Parse through the given package to find all the components.""" for pre in Property.find_key(pack.info, 'Prerequisites', []): if pre.value not in packages: LOGGER.warning( 'Package "{pre}" required for "{id}" - ' 'ignoring package!', pre=pre.value, id=pack.id, ) return False # First read through all the components we have, so we can match # overrides to the originals for comp_type in OBJ_TYPES: allow_dupes = OBJ_TYPES[comp_type].allow_mult # Look for overrides for obj in pack.info.find_all("Overrides", comp_type): obj_id = obj['id'] obj_override[comp_type][obj_id].append( ParseData(pack.zip, obj_id, obj, pack.id) ) for obj in pack.info.find_all(comp_type): obj_id = obj['id'] if obj_id in all_obj[comp_type]: if allow_dupes: # Pretend this is an override obj_override[comp_type][obj_id].append( ParseData(pack.zip, obj_id, obj, pack.id) ) else: raise Exception('ERROR! "' + obj_id + '" defined twice!') all_obj[comp_type][obj_id] = ObjData( pack.zip, obj, pack.id, pack.disp_name, ) img_count = 0 img_loc = os.path.join('resources', 'bee2') for item in zip_names(pack.zip): item = os.path.normcase(item).casefold() if item.startswith("resources"): extract_packages.res_count += 1 if item.startswith(img_loc): img_count += 1 return img_count
def restore_maps(maps: List[P2C]): """Copy the given maps to the game.""" game_dir = BACKUPS['game_path'] if game_dir is None: LOGGER.warning('No game selected to restore from?') return copy_loader.set_length('COPY', len(maps)) with copy_loader: for p2c in maps: back_zip = p2c.zip_file scr_path = p2c.filename + '.jpg' map_path = p2c.filename + '.p2c' abs_scr = os.path.join(game_dir, scr_path) abs_map = os.path.join(game_dir, map_path) if ( os.path.isfile(abs_scr) or os.path.isfile(abs_map) ): if not messagebox.askyesno( title='Overwrite File?', message=gettext('This map is already in the game directory.' 'Do you wish to overwrite it? ' '({})').format(p2c.title), parent=window, icon=messagebox.QUESTION, ): copy_loader.step('COPY') continue if scr_path in zip_names(back_zip): with zip_open_bin(back_zip, scr_path) as src: with open(abs_scr, 'wb') as dest: shutil.copyfileobj(src, dest) with zip_open_bin(back_zip, map_path) as src: with open(abs_map, 'wb') as dest: shutil.copyfileobj(src, dest) new_item = p2c.copy() new_item.zip_file = FakeZip(game_dir) BACKUPS['game'].append(new_item) copy_loader.step('COPY') refresh_game_details()
def restore_maps(maps: List[P2C]): """Copy the given maps to the game.""" game_dir = BACKUPS['game_path'] if game_dir is None: LOGGER.warning('No game selected to restore from?') return copy_loader.set_length('COPY', len(maps)) with copy_loader: for p2c in maps: back_zip = p2c.zip_file scr_path = p2c.filename + '.jpg' map_path = p2c.filename + '.p2c' abs_scr = os.path.join(game_dir, scr_path) abs_map = os.path.join(game_dir, map_path) if ( os.path.isfile(abs_scr) or os.path.isfile(abs_map) ): if not messagebox.askyesno( title='Overwrite File?', message=_('This map is already in the game directory.' 'Do you wish to overwrite it? ' '({})').format(p2c.title), parent=window, icon=messagebox.QUESTION, ): copy_loader.step('COPY') continue if scr_path in zip_names(back_zip): with zip_open_bin(back_zip, scr_path) as src: with open(abs_scr, 'wb') as dest: shutil.copyfileobj(src, dest) with zip_open_bin(back_zip, map_path) as src: with open(abs_map, 'wb') as dest: shutil.copyfileobj(src, dest) new_item = p2c.copy() new_item.zip_file = FakeZip(game_dir) BACKUPS['game'].append(new_item) copy_loader.step('COPY') refresh_game_details()
def do_copy(zip_list, done_files): shutil.rmtree('../cache/', ignore_errors=True) img_loc = os.path.join('resources', 'bee2') for zip_path in zip_list: if os.path.isfile(zip_path): zip_file = ZipFile(zip_path) else: zip_file = FakeZip(zip_path) with zip_file: for path in zip_names(zip_file): loc = os.path.normcase(path) if loc.startswith("resources"): # Don't re-extract images if not loc.startswith(img_loc): zip_file.extract(path, path="../cache/") with currently_done.get_lock(): done_files.value += 1
def load_backup(zip_file): """Load in a backup file.""" maps = [] puzzles = [ file[:-4] # Strip extension for file in zip_names(zip_file) if file.endswith('.p2c') ] # Each P2C init requires reading in the properties file, so this may take # some time. Use a loading screen. reading_loader.set_length('READ', len(puzzles)) with reading_loader: for file in puzzles: maps.append(P2C.from_file(file, zip_file)) reading_loader.step('READ') return maps
def parse_package(zip_file, info, pak_id, disp_name): """Parse through the given package to find all the components.""" for pre in Property.find_key(info, 'Prerequisites', []).value: if pre.value not in packages: utils.con_log('Package "' + pre.value + '" required for "' + pak_id + '" - ignoring package!') return False objects = 0 # First read through all the components we have, so we can match # overrides to the originals for comp_type in OBJ_TYPES: allow_dupes = OBJ_TYPES[comp_type].allow_mult # Look for overrides for obj in info.find_all("Overrides", comp_type): obj_id = obj['id'] obj_override[comp_type][obj_id].append( ParseData(zip_file, obj_id, obj, pak_id)) for obj in info.find_all(comp_type): obj_id = obj['id'] if obj_id in all_obj[comp_type]: if allow_dupes: # Pretend this is an override obj_override[comp_type][obj_id].append( ParseData(zip_file, obj_id, obj, pak_id)) else: raise Exception('ERROR! "' + obj_id + '" defined twice!') objects += 1 all_obj[comp_type][obj_id] = ObjData( zip_file, obj, pak_id, disp_name, ) img_count = 0 img_loc = os.path.join('resources', 'bee2') for item in zip_names(zip_file): item = os.path.normcase(item).casefold() if item.startswith("resources"): extract_packages.res_count += 1 if item.startswith(img_loc): img_count += 1 return objects, img_count
def save_backup(): """Save the backup file.""" # We generate it from scratch, since that's the only way to remove # files. new_zip_data = BytesIO() new_zip = ZipFile(new_zip_data, 'w', compression=ZIP_LZMA) maps = [ item.p2c for item in UI['back_details'].items ] copy_loader.set_length('COPY', len(maps)) with copy_loader: for p2c in maps: old_zip = p2c.zip_file map_path = p2c.path + '.p2c' scr_path = p2c.path + '.jpg' if scr_path in zip_names(old_zip): with zip_open_bin(old_zip, scr_path) as f: new_zip.writestr(scr_path, f.read()) with old_zip.open(map_path, 'r') as f: new_zip.writestr(map_path, f.read()) copy_loader.step('COPY') new_zip.close() # Finalize zip with open(BACKUPS['backup_path'], 'wb') as backup: backup.write(new_zip_data.getvalue()) BACKUPS['unsaved_file'] = new_zip_data # Remake the zipfile object, so it's open again. BACKUPS['backup_zip'] = new_zip = ZipFile( new_zip_data, mode='w', compression=ZIP_LZMA, ) # Update the items, so they use this zip now. for p2c in maps: p2c.zip_file = new_zip
def do_copy(zip_list, done_files): cache_path = os.path.abspath('../cache/') shutil.rmtree(cache_path, ignore_errors=True) img_loc = os.path.join('resources', 'bee2') for zip_path in zip_list: if os.path.isfile(zip_path): zip_file = ZipFile(zip_path) else: zip_file = FakeZip(zip_path) with zip_file: for path in zip_names(zip_file): loc = os.path.normcase(path) if loc.startswith("resources"): # Don't re-extract images if not loc.startswith(img_loc): zip_file.extract(path, path=cache_path) with currently_done.get_lock(): done_files.value += 1
def parse_package(zip_file, info, pak_id, disp_name): """Parse through the given package to find all the components.""" global res_count for pre in Property.find_key(info, 'Prerequisites', []).value: if pre.value not in packages: utils.con_log( 'Package "' + pre.value + '" required for "' + pak_id + '" - ignoring package!' ) return False objects = 0 # First read through all the components we have, so we can match # overrides to the originals for comp_type in obj_types: # Look for overrides for obj in info.find_all("Overrides", comp_type): obj_id = obj['id'] obj_override[comp_type][obj_id].append( (zip_file, obj) ) for obj in info.find_all(comp_type): obj_id = obj['id'] if obj_id in all_obj[comp_type]: raise Exception('ERROR! "' + obj_id + '" defined twice!') objects += 1 all_obj[comp_type][obj_id] = ObjData( zip_file, obj, pak_id, disp_name, ) if res_count != -1: for item in zip_names(zip_file): if item.startswith("resources"): res_count += 1 loader.set_length("RES", res_count) return objects
def load_packages( pak_dir, log_item_fallbacks=False, log_missing_styles=False, log_missing_ent_count=False, ): """Scan and read in all packages in the specified directory.""" global LOG_ENT_COUNT pak_dir = os.path.abspath(os.path.join(os.getcwd(), '..', pak_dir)) if not os.path.isdir(pak_dir): from tkinter import messagebox import sys # We don't have a packages directory! messagebox.showerror( master=loader, title='BEE2 - Invalid Packages Directory!', message='The given packages directory is not present!\n' 'Get the packages from ' '"http://github.com/TeamSpen210/BEE2-items" ' 'and place them in "' + pak_dir + os.path.sep + '".', # Add slash to the end to indicate it's a folder. ) sys.exit('No Packages Directory!') LOG_ENT_COUNT = log_missing_ent_count print('ENT_COUNT:', LOG_ENT_COUNT) zips = [] data['zips'] = [] try: find_packages(pak_dir, zips, data['zips']) loader.set_length("PAK", len(packages)) for obj_type in OBJ_TYPES: all_obj[obj_type] = {} obj_override[obj_type] = defaultdict(list) data[obj_type] = [] objects = 0 images = 0 for pak_id, (zip_file, info, name, dispName) in packages.items(): print(("Reading objects from '" + pak_id + "'...").ljust(50), end='') obj_count, img_count = parse_package( zip_file, info, pak_id, dispName, ) objects += obj_count images += img_count loader.step("PAK") print("Done!") loader.set_length("OBJ", objects) loader.set_length("IMG_EX", images) # The number of images we need to load is the number of objects, # excluding some types like Stylevars or PackLists. loader.set_length( "IMG", sum( len(all_obj[key]) for key, opts in OBJ_TYPES.items() if opts.has_img)) for obj_type, objs in all_obj.items(): for obj_id, obj_data in objs.items(): print("Loading " + obj_type + ' "' + obj_id + '"!') # parse through the object and return the resultant class try: object_ = OBJ_TYPES[obj_type].cls.parse( ParseData( obj_data.zip_file, obj_id, obj_data.info_block, obj_data.pak_id, )) except (NoKeyError, IndexError) as e: reraise_keyerror(e, obj_id) object_.pak_id = obj_data.pak_id object_.pak_name = obj_data.disp_name for override_data in obj_override[obj_type].get(obj_id, []): override = OBJ_TYPES[obj_type].cls.parse(override_data) object_.add_over(override) data[obj_type].append(object_) loader.step("OBJ") cache_folder = os.path.abspath('../cache/') shutil.rmtree(cache_folder, ignore_errors=True) img_loc = os.path.join('resources', 'bee2') for zip_file in zips: for path in zip_names(zip_file): loc = os.path.normcase(path).casefold() if loc.startswith(img_loc): loader.step("IMG_EX") zip_file.extract(path, path=cache_folder) shutil.rmtree('../images/cache', ignore_errors=True) if os.path.isdir("../cache/resources/bee2"): shutil.move("../cache/resources/bee2", "../images/cache") shutil.rmtree('../cache/', ignore_errors=True) finally: # close them all, we've already read the contents. for z in zips: z.close() print('Allocating styled items...') setup_style_tree( data['Item'], data['Style'], log_item_fallbacks, log_missing_styles, ) print(data['zips']) print('Done!') return data
def load_packages( pak_dir, load_res, log_item_fallbacks=False, log_missing_styles=False, log_missing_ent_count=False, ): """Scan and read in all packages in the specified directory.""" global res_count, LOG_ENT_COUNT pak_dir = os.path.abspath(os.path.join(os.getcwd(), '..', pak_dir)) if load_res: res_count = 0 else: loader.skip_stage("RES") LOG_ENT_COUNT = log_missing_ent_count print('ENT_COUNT:', LOG_ENT_COUNT) zips = [] data['zips'] = [] try: find_packages(pak_dir, zips, data['zips']) loader.set_length("PAK", len(packages)) for obj_type in obj_types: all_obj[obj_type] = {} obj_override[obj_type] = defaultdict(list) data[obj_type] = [] objects = 0 for pak_id, (zip_file, info, name, dispName) in packages.items(): print(("Reading objects from '" + pak_id + "'...").ljust(50), end='') obj_count = parse_package(zip_file, info, pak_id, dispName) objects += obj_count loader.step("PAK") print("Done!") loader.set_length("OBJ", objects) # Except for StyleVars, each object will have at least 1 image - # in UI.py we step the progress once per object. loader.set_length("IMG", objects - len(all_obj['StyleVar'])) for obj_type, objs in all_obj.items(): for obj_id, obj_data in objs.items(): print("Loading " + obj_type + ' "' + obj_id + '"!') # parse through the object and return the resultant class try: object_ = obj_types[obj_type].parse( ParseData( obj_data.zip_file, obj_id, obj_data.info_block, ) ) except (NoKeyError, IndexError) as e: reraise_keyerror(e, obj_id) object_.pak_id = obj_data.pak_id object_.pak_name = obj_data.disp_name for zip_file, info_block in \ obj_override[obj_type].get(obj_id, []): override = obj_types[obj_type].parse( ParseData( zip_file, obj_id, info_block, ) ) object_.add_over(override) data[obj_type].append(object_) loader.step("OBJ") if load_res: print('Extracting Resources...') for zip_file in zips: for path in zip_names(zip_file): loc = os.path.normcase(path) if loc.startswith("resources"): loader.step("RES") zip_file.extract(path, path="../cache/") shutil.rmtree('../images/cache', ignore_errors=True) shutil.rmtree('../inst_cache/', ignore_errors=True) shutil.rmtree('../source_cache/', ignore_errors=True) if os.path.isdir("../cache/resources/bee2"): shutil.move("../cache/resources/bee2", "../images/cache") if os.path.isdir("../cache/resources/instances"): shutil.move("../cache/resources/instances", "../inst_cache/") for file_type in ("materials", "models", "sound", "scripts"): if os.path.isdir("../cache/resources/" + file_type): shutil.move( "../cache/resources/" + file_type, "../source_cache/" + file_type, ) shutil.rmtree('../cache/', ignore_errors=True) print('Done!') finally: # close them all, we've already read the contents. for z in zips: z.close() print('Allocating styled items...') setup_style_tree( data['Item'], data['Style'], log_item_fallbacks, log_missing_styles, ) print(data['zips']) print('Done!') return data
def load_packages( pak_dir, log_item_fallbacks=False, log_missing_styles=False, log_missing_ent_count=False, ): """Scan and read in all packages in the specified directory.""" global LOG_ENT_COUNT pak_dir = os.path.abspath(os.path.join(os.getcwd(), '..', pak_dir)) if not os.path.isdir(pak_dir): from tkinter import messagebox import sys # We don't have a packages directory! messagebox.showerror( master=loader, title='BEE2 - Invalid Packages Directory!', message='The given packages directory is not present!\n' 'Get the packages from ' '"http://github.com/TeamSpen210/BEE2-items" ' 'and place them in "' + pak_dir + os.path.sep + '".', # Add slash to the end to indicate it's a folder. ) sys.exit('No Packages Directory!') LOG_ENT_COUNT = log_missing_ent_count print('ENT_COUNT:', LOG_ENT_COUNT) zips = [] data['zips'] = [] try: find_packages(pak_dir, zips, data['zips']) loader.set_length("PAK", len(packages)) for obj_type in OBJ_TYPES: all_obj[obj_type] = {} obj_override[obj_type] = defaultdict(list) data[obj_type] = [] objects = 0 images = 0 for pak_id, (zip_file, info, name, dispName) in packages.items(): print( ("Reading objects from '" + pak_id + "'...").ljust(50), end='' ) obj_count, img_count = parse_package( zip_file, info, pak_id, dispName, ) objects += obj_count images += img_count loader.step("PAK") print("Done!") loader.set_length("OBJ", objects) loader.set_length("IMG_EX", images) # The number of images we need to load is the number of objects, # excluding some types like Stylevars or PackLists. loader.set_length( "IMG", sum( len(all_obj[key]) for key, opts in OBJ_TYPES.items() if opts.has_img ) ) for obj_type, objs in all_obj.items(): for obj_id, obj_data in objs.items(): print("Loading " + obj_type + ' "' + obj_id + '"!') # parse through the object and return the resultant class try: object_ = OBJ_TYPES[obj_type].cls.parse( ParseData( obj_data.zip_file, obj_id, obj_data.info_block, obj_data.pak_id, ) ) except (NoKeyError, IndexError) as e: reraise_keyerror(e, obj_id) object_.pak_id = obj_data.pak_id object_.pak_name = obj_data.disp_name for override_data in obj_override[obj_type].get(obj_id, []): override = OBJ_TYPES[obj_type].cls.parse( override_data ) object_.add_over(override) data[obj_type].append(object_) loader.step("OBJ") cache_folder = os.path.abspath('../cache/') shutil.rmtree(cache_folder, ignore_errors=True) img_loc = os.path.join('resources', 'bee2') for zip_file in zips: for path in zip_names(zip_file): loc = os.path.normcase(path).casefold() if loc.startswith(img_loc): loader.step("IMG_EX") zip_file.extract(path, path=cache_folder) shutil.rmtree('../images/cache', ignore_errors=True) if os.path.isdir("../cache/resources/bee2"): shutil.move("../cache/resources/bee2", "../images/cache") shutil.rmtree('../cache/', ignore_errors=True) finally: # close them all, we've already read the contents. for z in zips: z.close() print('Allocating styled items...') setup_style_tree( data['Item'], data['Style'], log_item_fallbacks, log_missing_styles, ) print(data['zips']) print('Done!') return data
def load_packages( pak_dir, log_item_fallbacks=False, log_missing_styles=False, log_missing_ent_count=False, log_incorrect_packfile=False, ): """Scan and read in all packages in the specified directory.""" global LOG_ENT_COUNT, CHECK_PACKFILE_CORRECTNESS pak_dir = os.path.abspath(os.path.join(os.getcwd(), '..', pak_dir)) if not os.path.isdir(pak_dir): from tkinter import messagebox import sys # We don't have a packages directory! messagebox.showerror( master=loader, title='BEE2 - Invalid Packages Directory!', message='The given packages directory is not present!\n' 'Get the packages from ' '"http://github.com/TeamSpen210/BEE2-items" ' 'and place them in "' + pak_dir + os.path.sep + '".', # Add slash to the end to indicate it's a folder. ) sys.exit('No Packages Directory!') LOG_ENT_COUNT = log_missing_ent_count CHECK_PACKFILE_CORRECTNESS = log_incorrect_packfile zips = [] data['zips'] = [] try: find_packages(pak_dir, zips, data['zips']) pack_count = len(packages) loader.set_length("PAK", pack_count) for obj_type in OBJ_TYPES: all_obj[obj_type] = {} obj_override[obj_type] = defaultdict(list) data[obj_type] = [] images = 0 for pak_id, pack in packages.items(): if not pack.enabled: LOGGER.info('Package {id} disabled!', id=pak_id) pack_count -= 1 loader.set_length("PAK", pack_count) continue LOGGER.info('Reading objects from "{id}"...', id=pak_id) img_count = parse_package(pack) images += img_count loader.step("PAK") # If new packages were added, update the config! PACK_CONFIG.save_check() loader.set_length("OBJ", sum( len(obj_type) for obj_type in all_obj.values() )) loader.set_length("IMG_EX", images) # The number of images we need to load is the number of objects, # excluding some types like Stylevars or PackLists. loader.set_length( "IMG", sum( len(all_obj[key]) for key, opts in OBJ_TYPES.items() if opts.has_img ) ) for obj_type, objs in all_obj.items(): for obj_id, obj_data in objs.items(): LOGGER.debug('Loading {type} "{id}"!', type=obj_type, id=obj_id) # parse through the object and return the resultant class try: object_ = OBJ_TYPES[obj_type].cls.parse( ParseData( obj_data.zip_file, obj_id, obj_data.info_block, obj_data.pak_id, ) ) except (NoKeyError, IndexError) as e: reraise_keyerror(e, obj_id) object_.pak_id = obj_data.pak_id object_.pak_name = obj_data.disp_name for override_data in obj_override[obj_type].get(obj_id, []): override = OBJ_TYPES[obj_type].cls.parse( override_data ) object_.add_over(override) data[obj_type].append(object_) loader.step("OBJ") cache_folder = os.path.abspath('../cache/') shutil.rmtree(cache_folder, ignore_errors=True) img_loc = os.path.join('resources', 'bee2') for zip_file in zips: for path in zip_names(zip_file): loc = os.path.normcase(path).casefold() if loc.startswith(img_loc): loader.step("IMG_EX") zip_file.extract(path, path=cache_folder) shutil.rmtree('../images/cache', ignore_errors=True) if os.path.isdir("../cache/resources/bee2"): shutil.move("../cache/resources/bee2", "../images/cache") shutil.rmtree('../cache/', ignore_errors=True) finally: # close them all, we've already read the contents. for z in zips: z.close() LOGGER.info('Allocating styled items...') setup_style_tree( data['Item'], data['Style'], log_item_fallbacks, log_missing_styles, ) return data