def scan_asset_folder(self, folder): pak_path = os.path.join(folder, "packed.pak") if os.path.isfile(pak_path): db = starbound.open_file(pak_path) index = [(x, pak_path) for x in db.get_index()] return index else: # old style, probably a mod index = [] mod_assets = None files = os.listdir(folder) logging.debug(files) # TODO: would like to keep this idea but moved to modpak specific function found_mod_info = False #will need more logic to handle .modpack with modinfo inside. for f in files: if f.endswith(".modinfo"): modinfo = os.path.join(folder, f) try: modinfo_data = load_asset_file(modinfo) path = "./" if modinfo_data["path"]: path = modinfo_data["path"] mod_assets = os.path.join(folder, path) found_mod_info = True except ValueError: # really old mods folder = os.path.join(folder, "assets") if os.path.isdir(folder): mod_assets = folder logging.debug(mod_assets) if mod_assets is None: return index elif found_mod_info and self.is_packed_file(mod_assets): # TODO: make a .pak scanner function that works for vanilla and mods pak_path = os.path.normpath(mod_assets) db = starbound.open_file(pak_path) for x in db.get_index(): # removes thumbs.db etc from user pak files if re.match(ignore_assets, x) is None: index.append((x, pak_path)) return index elif not os.path.isdir(mod_assets): return index # now we can scan! for root, dirs, files in os.walk(mod_assets): for f in files: if re.match(ignore_assets, f) is None: asset_folder = os.path.normpath(mod_assets) asset_file = os.path.normpath(os.path.join(root.replace(folder, ""), f)) index.append((asset_file, asset_folder)) return index
def scan_asset_folder(self, folder): pak_path = os.path.join(folder, "packed.pak") if os.path.isfile(pak_path): db = starbound.open_file(pak_path) index = [(x, pak_path) for x in db.get_index()] return index else: # old style, probably a mod index = [] mod_assets = None files = os.listdir(folder) logging.debug(files) # TODO: would like to keep this idea but moved to modpak specific function found_mod_info = False #will need more logic to handle .modpack with modinfo inside. for f in files: if f.endswith(".modinfo"): modinfo = os.path.join(folder, f) try: modinfo_data = load_asset_file(modinfo) path = modinfo_data["path"] mod_assets = os.path.join(folder, path) found_mod_info = True except ValueError: # really old mods folder = os.path.join(folder, "assets") if os.path.isdir(folder): mod_assets = folder logging.debug(mod_assets) if mod_assets == None: return index elif found_mod_info and self.is_packed_file(mod_assets): # TODO: make a .pak scanner function that works for vanilla and mods pak_path = os.path.normpath(mod_assets) db = starbound.open_file(pak_path) for x in db.get_index(): # removes thumbs.db etc from user pak files if re.match(ignore_assets, x) == None: index.append((x, pak_path)) return index elif not os.path.isdir(mod_assets): return index # now we can scan! for root, dirs, files in os.walk(mod_assets): for f in files: if re.match(ignore_assets, f) == None: asset_folder = os.path.normpath(mod_assets) asset_file = os.path.normpath( os.path.join(root.replace(folder, ""), f)) index.append((asset_file, asset_folder)) return index
def read(self, key, path, image=False): if self.is_packed_file(path): key = key.lower() db = starbound.open_file(path) # try the cache first if image and key in self.image_cache: return self.image_cache[key] try: data = db.get(key) except KeyError: if image and path != self.vanilla_assets: img = self.read(key, self.vanilla_assets, image) self.image_cache[key] = img return img else: logging.exception( "Unable to read db asset '%s' from '%s'" % (key, path)) return None if image: img = data self.image_cache[key] = img return img else: try: asset = parse_json(data.decode("utf-8"), key) return asset except ValueError: logging.exception( "Unable to read db asset '%s' from '%s'" % (key, path)) return None else: asset_file = os.path.join(path, key[1:]) try: if image: img = open(asset_file, "rb").read() self.image_cache[key] = img return img else: asset = load_asset_file(asset_file) return asset except (FileNotFoundError, ValueError): if image and path != self.vanilla_assets: if self.is_packed_file(self.vanilla_assets): img = self.read(key.replace("\\", "/"), self.vanilla_assets, image) self.image_cache[key] = img return img else: img = self.read(key, self.vanilla_assets, image) self.image_cache[key] = img return img else: logging.exception( "Unable to read asset file '%s' from '%s'" % (key, path)) return None
def main(): p = optparse.OptionParser() p.add_option('-d', '--destination', dest='path', help='Destination directory') options, arguments = p.parse_args() if len(arguments) != 1: raise ValueError('Only one argument is supported (package path)') package_path = arguments[0] base = options.path if options.path else '.' with starbound.open_file(package_path) as package: if not isinstance(package, starbound.Package): raise ValueError('Provided path is not a package') print 'Loading index...' # Get the paths from the index in the database. paths = list(package.get_index()) print 'Index loaded. Extracting %d files...' % len(paths) num_files = 0 percentage_count = max(len(paths) // 100, 1) for path in paths: dest_path = base + path dir_path = os.path.dirname(dest_path) if not os.path.exists(dir_path): os.makedirs(dir_path) try: data = package.get(path) except: # break the dots in case std{out,err} are the same tty: sys.stdout.write('\n') sys.stdout.flush() print >> sys.stderr, 'W: Failed to read', path continue with open(dest_path, 'wb') as file: file.write(data) num_files += 1 if not num_files % percentage_count: sys.stdout.write('.') sys.stdout.flush() print print 'Extracted %d files.' % num_files
def read(self, key, path, image=False): if self.is_packed_file(path): key = key.lower() db = starbound.open_file(path) # try the cache first if image and key in self.image_cache: return self.image_cache[key] try: data = db.get(key) except KeyError: if image and path != self.vanilla_assets: img = self.read(key, self.vanilla_assets, image) self.image_cache[key] = img return img else: logging.exception("Unable to read db asset '%s' from '%s'" % (key, path)) return None if image: img = data self.image_cache[key] = img return img else: try: asset = parse_json(data.decode("utf-8"), key) return asset except ValueError: logging.exception("Unable to read db asset '%s' from '%s'" % (key, path)) return None else: asset_file = os.path.join(path, key[1:]) try: if image: img = open(asset_file, "rb").read() self.image_cache[key] = img return img else: asset = load_asset_file(asset_file) return asset except (FileNotFoundError, ValueError): if image and path != self.vanilla_assets: if self.is_packed_file(self.vanilla_assets): img = self.read(key.replace("\\", "/"), self.vanilla_assets, image) self.image_cache[key] = img return img else: img = self.read(key, self.vanilla_assets, image) self.image_cache[key] = img return img else: logging.exception("Unable to read asset file '%s' from '%s'" % (key, path)) return None
def main(): p = optparse.OptionParser() p.add_option('-d', '--destination', dest='path', help='Destination directory') options, arguments = p.parse_args() if len(arguments) != 1: raise ValueError('Only one argument is supported (package path)') package_path = arguments[0] base = options.path if options.path else '.' with starbound.open_file(package_path) as package: if not isinstance(package, starbound.Package): raise ValueError('Provided path is not a package') print 'Loading index...' # Get the paths from the index in the database. paths = list(package.get_index()) print 'Index loaded. Extracting %d files...' % len(paths) num_files = 0 percentage_count = max(len(paths) // 100, 1) for path in paths: dest_path = base + path dir_path = os.path.dirname(dest_path) if not os.path.exists(dir_path): os.makedirs(dir_path) try: data = package.get(path) except: # break the dots in case std{out,err} are the same tty: sys.stdout.write('\n') sys.stdout.flush() print >>sys.stderr, 'W: Failed to read', path continue with open(dest_path, 'w') as file: file.write(data) num_files += 1 if not num_files % percentage_count: sys.stdout.write('.') sys.stdout.flush() print print 'Extracted %d files.' % num_files
def main(): p = optparse.OptionParser() p.add_option('-f', '--get-file', dest='path', help=get_file.__doc__) p.add_option('-i', '--get-file-list', dest='get_file_list', action='store_true', default=False, help=get_file_list.__doc__) p.add_option('-d', '--get-leaf', dest='leaf_key', help=get_leaf.__doc__) p.add_option('-g', '--get-value', dest='key', help=get_value.__doc__) p.add_option('-l', '--print-leaves', dest='print_leaves', action='store_true', default=False, help=print_leaves.__doc__) options, arguments = p.parse_args() for path in arguments: with starbound.open_file(path) as file: if options.path: get_file(file, options.path) return print file print if options.get_file_list: get_file_list(file) print if options.leaf_key: get_leaf(file, options.leaf_key) print if options.key: get_value(file, options.key) print if options.print_leaves: print_leaves(file) print
def main(): p = optparse.OptionParser() p.add_option('-l', '--print-leaves', dest='print_leaves', action='store_true', default=False, help=print_leaves.__doc__) p.add_option('-w', '--get-world-value', dest='world_key_path', help=get_world_value.__doc__) options, arguments = p.parse_args() for path in arguments: with starbound.open_file(path) as file: print file print if options.world_key_path: get_world_value(file, options.world_key_path) print if options.print_leaves: print_leaves(file) print
def main(): p = optparse.OptionParser('Usage: %prog [<x> <y>] <path>') p.add_option('-c', '--tile-coords', dest='tile_coords', action='store_true', default=False, help='X, Y is for a tile instead of a region') p.add_option('-e', '--entities', dest='entities', action='store_true', default=False, help='Output entity data instead of tile data') p.add_option('-r', '--raw', dest='raw', action='store_true', default=False, help='Output data in a raw format') options, arguments = p.parse_args() # Get the path and coordinates from arguments. if len(arguments) == 1: path = arguments[0] x, y = None, None elif len(arguments) == 3: x, y, path = arguments x, y = int(x), int(y) if options.tile_coords: x //= 32 y //= 32 else: p.error('Incorrect number of arguments') with starbound.open_file(path) as world: # Get information about the world. metadata, version = world.get_metadata() if version == 1: size = metadata['planet']['size'] spawn = metadata.get('playerStart') elif version == 2 or version == 3: size = metadata['worldTemplate']['size'] spawn = metadata.get('playerStart') else: p.error('Unsupported metadata version %d' % version) # Default coordinates to spawn point. if x is None or y is None: x, y = int(spawn[0] / 32), int(spawn[1] / 32) # Only print the pure data if --raw is specified. if options.raw: if options.entities: print json.dumps(world.get_entities(x, y), indent=2, separators=(',', ': '), sort_keys=True) else: print world.get_region_data(x, y) return print 'World size: %d by %d regions' % (size[0] / 32, size[1] / 32) if spawn: print 'Spawn point region: %d, %d' % (spawn[0] // 32, spawn[1] // 32) print 'Outputting region: %d, %d' % (x, y) print if options.entities: data = world.get_entities(x, y) print json.dumps(data, indent=2, separators=(',', ': '), sort_keys=True) else: pretty_print_tiles(world, x, y)
def main(): p = optparse.OptionParser('Usage: %prog [options] <input file>') p.add_option('-f', '--force', dest='force', action='store_true', default=False, help='ignore some errors') p.add_option('-o', '--output', dest='output', help='where to output repaired world (defaults to input file ' 'path with .repaired added to the end)') # TODO #p.add_option('-r', '--replace', action='append', # dest='replace', metavar='FROM,TO', # help='replace one tile material with another') p.add_option('-w', '--blank-world', dest='world', help='the blank .world file that was created in place of the ' '.fail one (for metadata recovery)') options, arguments = p.parse_args() if len(arguments) != 1: p.error('Incorrect number of arguments') try: world = starbound.open_file(arguments[0]) if not isinstance(world, starbound.World): raise Exception('Not a world') except Exception as e: p.error('Could not open fail file (%s)' % e) if options.output: out_name = options.output else: out_name = arguments[0] + '.repaired' if os.path.isfile(out_name): if options.force: print 'warning: overwriting existing file' else: p.error('"%s" already exists' % out_name) if options.world: fail_name = os.path.basename(arguments[0]) world_name = os.path.basename(options.world) if fail_name[:len(world_name)] != world_name: if options.force: print 'warning: .fail and .world filenames do not match' else: p.error('.fail and .world filenames do not match') try: blank_world = starbound.open_file(options.world) except Exception as e: p.error('Could not open blank world (%s)' % e) # This dict will contain all the keys and their data. data = dict() try: metadata, version = world.get_metadata() except Exception as e: if options.world: try: print 'warning: restoring metadata using blank world' metadata, version = blank_world.get_metadata() except Exception as e: p.error('Failed to restore metadata (%s)' % e) else: p.error('Metadata section is corrupt (%s)' % e) if version == 1: size = metadata['planet']['size'] elif version in (2, 3): size = metadata['worldTemplate']['size'] else: p.error('Unsupported metadata version %d' % version) regions_x = int(math.ceil(size[0] / 32)) regions_y = int(math.ceil(size[1] / 32)) print 'Attempting to recover %d×%d regions...' % (regions_x, regions_y) blocks_per_percent = world.num_blocks // 100 + 1 entries_recovered = 0 percent = 0 # Find all leaves and try to read them individually. for index in range(world.num_blocks): if index % blocks_per_percent == 0: print '%d%% (%d entries recovered)' % (percent, entries_recovered) percent += 1 block = world.get_block(index) if not isinstance(block, starbound.btreedb4.BTreeLeaf): continue stream = starbound.btreedb4.LeafReader(world, block) try: num_keys, = struct.unpack('>i', stream.read(4)) except Exception: continue # Ensure that the number of keys makes sense, otherwise skip the leaf. if num_keys > 100: continue for i in range(num_keys): try: cur_key = stream.read(world.key_size) cur_data = starbound.sbon.read_bytes(stream) except Exception: break layer, x, y = struct.unpack('>BHH', cur_key) # Skip this leaf if we encounter impossible indexes. if layer == 0 and (x != 0 or y != 0): break if layer not in (0, 1, 2) or x >= regions_x or y >= regions_y: break result = None if cur_key in data: # Duplicates should be checked up against the index, which # always wins. try: result = world.get_raw((layer, x, y)) except Exception: world.commit() try: result = world.get_raw((layer, x, y)) except Exception: pass world.commit() # Use the data from this leaf if not using the index. if not result: result = cur_data # Validate the data before storing it. # TODO: This is where we would do the tile replace. try: if layer == 0: temp_stream = io.BytesIO(result) temp_stream.seek(8) name, _, _ = starbound.sbon.read_document(temp_stream) assert name == 'WorldMetadata', 'Invalid world data' else: temp_stream = io.BytesIO(zlib.decompress(result)) if layer == 1: if len(temp_stream.getvalue()) != 3 + 32 * 32 * 23: continue elif layer == 2: starbound.sbon.read_document_list(temp_stream) except Exception: continue # Count the entry the first time it's stored. if cur_key not in data: entries_recovered += 1 data[cur_key] = result METADATA_KEY = '\x00\x00\x00\x00\x00' # Ensure that the metadata key is in the data. if METADATA_KEY not in data: if options.world: try: data[METADATA_KEY] = blank_world.get_raw((0, 0, 0)) except Exception: p.error('Failed to recover metadata from alternate world') else: if options.force: try: data[METADATA_KEY] = world.get_raw((0, 0, 0)) print 'warning: using partially recovered metadata' except Exception: p.error('Failed to recover partial metadata') else: p.error('Failed to recover metadata, use -f to recover partial') print 'Done! %d entries recovered. Creating BTree database...' % entries_recovered # Try not to exceed this number of keys per leaf. LEAF_KEYS_TRESHOLD = 10 # Try not to exceed this size for a leaf. LEAF_SIZE_TRESHOLD = world.block_size * .8 # Fill indexes up to this ratio. INDEX_FILL = .9 # 6 is the number of bytes used for signature + next block pointer. LEAF_BYTES = world.block_size - 6 # 11 is the number of bytes in the index header. INDEX_BYTES = world.block_size - 11 # Maximum number of keys that can go into an index. INDEX_MAX_KEYS = int(INDEX_BYTES // (world.key_size + 4) * INDEX_FILL) # The data of individual blocks will be stored in this list. blocks = [] buffer = io.BytesIO() # This will create an initial leaf and connect it to following leaves which # will all contain the data currently in the buffer. def dump_buffer(): buffer_size = buffer.tell() buffer.seek(0) block_data = b'LL' + struct.pack('>i', num_keys) + buffer.read(LEAF_BYTES - 4) while buffer.tell() < buffer_size: blocks.append(block_data + struct.pack('>i', len(blocks) + 1)) block_data = b'LL' + buffer.read(LEAF_BYTES) blocks.append(block_data.ljust(world.block_size - 4, b'\x00') + struct.pack('>i', -1)) # Empty the buffer. buffer.seek(0) buffer.truncate() # The number of keys that will be stored in the next created leaf. num_keys = 0 # Map of key range to leaf block pointer. range_to_leaf = dict() # All the keys, sorted (important). keys = sorted(data.keys()) # Build all the leaf blocks. min_key = None for key in keys: if not num_keys: # Remember the first key of the leaf. min_key = key buffer.write(key) starbound.sbon.write_bytes(buffer, data[key]) num_keys += 1 # Empty buffer once one of the tresholds is reached. if num_keys >= LEAF_KEYS_TRESHOLD or buffer.tell() >= LEAF_SIZE_TRESHOLD: range_to_leaf[(min_key, key)] = len(blocks) dump_buffer() num_keys = 0 # Empty any remaining data in the buffer. if buffer.tell(): range_to_leaf[(min_key, key)] = len(blocks) dump_buffer() print 'Created %d blocks containing world data' % len(blocks) def build_index_level(range_to_block, level=0): # Get a list of ranges that this index level needs to point to. index_ranges = sorted(range_to_block.keys()) # The new list of ranges that the next level of indexes can use. new_ranges = dict() for i in range(0, len(index_ranges), INDEX_MAX_KEYS): ranges = index_ranges[i:i + INDEX_MAX_KEYS] min_key, _ = ranges[0] _, max_key = ranges[-1] left_block = range_to_block[ranges.pop(0)] index_data = io.BytesIO() index_data.write(b'II' + struct.pack('>Bii', level, len(ranges), left_block)) for key_range in ranges: index_data.write(key_range[0] + struct.pack('>i', range_to_block[key_range])) new_ranges[(min_key, max_key)] = len(blocks) blocks.append(index_data.getvalue().ljust(world.block_size, b'\x00')) print '- Created %d index(es) for level %d' % (len(new_ranges), level) return new_ranges # Build the indexes in multiple levels up to a single root node. print 'Creating root node...' root_is_leaf = True level = 0 current_index = range_to_leaf while len(current_index) > 1: current_index = build_index_level(current_index, level) root_is_leaf = False level += 1 root_node = current_index.itervalues().next() # Also build an alternative root node. print 'Creating alternate root node...' alternate_root_is_leaf = True level = 0 current_index = range_to_leaf while len(current_index) > 1: current_index = build_index_level(current_index, level) alternate_root_is_leaf = False level += 1 alternate_root_node = current_index.itervalues().next() # The last block will be a free block. blocks.append(b'FF\xFF\xFF\xFF\xFF' + b'\x00' * (world.block_size - 6)) print 'Writing all the data to disk...' with open(out_name, 'w') as f: f.write(b'SBBF02') f.write(struct.pack('>ii?i', world.header_size, world.block_size, True, len(blocks) - 1)) f.write('\x00' * (32 - f.tell())) f.write(b'BTreeDB4\x00\x00\x00\x00') f.write(world.identifier + '\x00' * (12 - len(world.identifier))) f.write(struct.pack('>i?xi?xxxi?', world.key_size, False, root_node, root_is_leaf, alternate_root_node, alternate_root_is_leaf)) f.write('\x00' * (world.header_size - f.tell())) for block in blocks: f.write(block) print 'Done!'
def main(): p = optparse.OptionParser('Usage: %prog [<x> <y>] <path>') p.add_option('-c', '--tile-coords', dest='tile_coords', action='store_true', default=False, help='X, Y is for a tile instead of a region') p.add_option('-e', '--entities', dest='entities', action='store_true', default=False, help='Output entity data instead of tile data') p.add_option('-r', '--raw', dest='raw', action='store_true', default=False, help='Output data in a raw format') options, arguments = p.parse_args() # Get the path and coordinates from arguments. if len(arguments) == 1: path = arguments[0] x, y = None, None elif len(arguments) == 3: x, y, path = arguments x, y = int(x), int(y) if options.tile_coords: x //= 32 y //= 32 else: p.error('Incorrect number of arguments') with starbound.open_file(path) as world: # Get information about the world. metadata, version = world.get_metadata() if version == 1: size = metadata['planet']['size'] spawn = metadata.get('playerStart') else: size = metadata['worldTemplate']['size'] spawn = metadata.get('playerStart') # Default coordinates to spawn point. if x is None or y is None: x, y = int(spawn[0] / 32), int(spawn[1] / 32) # Only print the pure data if --raw is specified. if options.raw: if options.entities: print json.dumps(world.get_entities(x, y), indent=2, separators=(',', ': '), sort_keys=True) else: print world.get_region_data(x, y) return print 'World size: %d by %d regions' % (size[0] / 32, size[1] / 32) if spawn: print 'Spawn point region: %d, %d' % (spawn[0] // 32, spawn[1] // 32) print 'Outputting region: %d, %d' % (x, y) print if options.entities: data = world.get_entities(x, y) print json.dumps(data, indent=2, separators=(',', ': '), sort_keys=True) else: pretty_print_tiles(world, x, y)
def scan_modpak(self, modpak): # TODO: may need support for reading the mod folder from the pakinfo file db = starbound.open_file(modpak) index = [(x, modpak) for x in db.get_index()] return index
def main(): p = optparse.OptionParser('Usage: %prog [options] <input file>') p.add_option('-f', '--force', dest='force', action='store_true', default=False, help='ignore some errors') p.add_option('-o', '--output', dest='output', help='where to output repaired world (defaults to input file ' 'path with .repaired added to the end)') # TODO #p.add_option('-r', '--replace', action='append', # dest='replace', metavar='FROM,TO', # help='replace one tile material with another') p.add_option('-w', '--blank-world', dest='world', help='the blank .world file that was created in place of the ' '.fail one (for metadata recovery)') options, arguments = p.parse_args() if len(arguments) != 1: p.error('Incorrect number of arguments') try: world = starbound.open_file(arguments[0]) if not isinstance(world, starbound.World): raise Exception('Not a world') except Exception as e: p.error('Could not open fail file (%s)' % e) if options.output: out_name = options.output else: out_name = arguments[0] + '.repaired' if os.path.isfile(out_name): if options.force: print('warning: overwriting existing file') else: p.error('"%s" already exists' % out_name) if options.world: fail_name = os.path.basename(arguments[0]) world_name = os.path.basename(options.world) if fail_name[:len(world_name)] != world_name: if options.force: print('warning: .fail and .world filenames do not match') else: p.error('.fail and .world filenames do not match') try: blank_world = starbound.open_file(options.world) except Exception as e: p.error('Could not open blank world (%s)' % e) # This dict will contain all the keys and their data. data = dict() try: metadata, version = world.get_metadata() except Exception as e: if options.world: try: print('warning: restoring metadata using blank world') metadata, version = blank_world.get_metadata() except Exception as e: p.error('Failed to restore metadata (%s)' % e) else: p.error('Metadata section is corrupt (%s)' % e) try: if version == 1: size = metadata['planet']['size'] elif version in (2, 3): size = metadata['worldTemplate']['size'] else: size = [-1, -1] print('warning: unsupported metadata version %d' % version) except Exception as e: size = [-1, -1] print('warning: failed to read world size (%s)' % e) regions_x = int(math.ceil(size[0] / 32)) regions_y = int(math.ceil(size[1] / 32)) print('Attempting to recover %d×%d regions...' % (regions_x, regions_y)) blocks_per_percent = world.num_blocks // 100 + 1 entries_recovered = 0 percent = 0 # Find all leaves and try to read them individually. for index in range(world.num_blocks): if index % blocks_per_percent == 0: print('%d%% (%d entries recovered)' % (percent, entries_recovered)) percent += 1 block = world.get_block(index) if not isinstance(block, starbound.btreedb4.BTreeLeaf): continue stream = starbound.btreedb4.LeafReader(world, block) try: num_keys, = struct.unpack('>i', stream.read(4)) except Exception: continue # Ensure that the number of keys makes sense, otherwise skip the leaf. if num_keys > 100: continue for i in range(num_keys): try: cur_key = stream.read(world.key_size) cur_data = starbound.sbon.read_bytes(stream) except Exception: break layer, x, y = struct.unpack('>BHH', cur_key) # Skip this leaf if we encounter impossible indexes. if layer == 0 and (x != 0 or y != 0): break if layer not in (0, 1, 2) or x >= regions_x or y >= regions_y: break result = None if cur_key in data: # Duplicates should be checked up against the index, which # always wins. try: result = world.get_raw((layer, x, y)) except Exception: world.commit() try: result = world.get_raw((layer, x, y)) except Exception: pass world.commit() # Use the data from this leaf if not using the index. if not result: result = cur_data # Validate the data before storing it. # TODO: This is where we would do the tile replace. try: if layer == 0: temp_stream = io.BytesIO(result) temp_stream.seek(8) name, _, _ = starbound.sbon.read_document(temp_stream) assert name == 'WorldMetadata', 'Invalid world data' else: temp_stream = io.BytesIO(zlib.decompress(result)) if layer == 1: if len(temp_stream.getvalue()) != 3 + 32 * 32 * 23: continue elif layer == 2: starbound.sbon.read_document_list(temp_stream) except Exception: continue # Count the entry the first time it's stored. if cur_key not in data: entries_recovered += 1 data[cur_key] = result METADATA_KEY = b'\x00\x00\x00\x00\x00' # Ensure that the metadata key is in the data. if METADATA_KEY not in data: if options.world: try: data[METADATA_KEY] = blank_world.get_raw((0, 0, 0)) except Exception: p.error('Failed to recover metadata from alternate world') else: if options.force: try: data[METADATA_KEY] = world.get_raw((0, 0, 0)) print('warning: using partially recovered metadata') except Exception: p.error('Failed to recover partial metadata') else: p.error( 'Failed to recover metadata, use -w to load metadata ' 'from another world, or -f to attempt partial recovery') print('Done! %d entries recovered. Creating BTree database...' % entries_recovered) # Try not to exceed this number of keys per leaf. LEAF_KEYS_TRESHOLD = 10 # Try not to exceed this size for a leaf. LEAF_SIZE_TRESHOLD = world.block_size * .8 # Fill indexes up to this ratio. INDEX_FILL = .9 # 6 is the number of bytes used for signature + next block pointer. LEAF_BYTES = world.block_size - 6 # 11 is the number of bytes in the index header. INDEX_BYTES = world.block_size - 11 # Maximum number of keys that can go into an index. INDEX_MAX_KEYS = int(INDEX_BYTES // (world.key_size + 4) * INDEX_FILL) # The data of individual blocks will be stored in this list. blocks = [] buffer = io.BytesIO() # This will create an initial leaf and connect it to following leaves which # will all contain the data currently in the buffer. def dump_buffer(): buffer_size = buffer.tell() buffer.seek(0) block_data = b'LL' + struct.pack( '>i', num_keys) + buffer.read(LEAF_BYTES - 4) while buffer.tell() < buffer_size: blocks.append(block_data + struct.pack('>i', len(blocks) + 1)) block_data = b'LL' + buffer.read(LEAF_BYTES) blocks.append( block_data.ljust(world.block_size - 4, b'\x00') + struct.pack('>i', -1)) # Empty the buffer. buffer.seek(0) buffer.truncate() # The number of keys that will be stored in the next created leaf. num_keys = 0 # Map of key range to leaf block pointer. range_to_leaf = dict() # All the keys, sorted (important). keys = sorted(data) # Build all the leaf blocks. min_key = None for key in keys: if not num_keys: # Remember the first key of the leaf. min_key = key buffer.write(key) starbound.sbon.write_bytes(buffer, data[key]) num_keys += 1 # Empty buffer once one of the tresholds is reached. if num_keys >= LEAF_KEYS_TRESHOLD or buffer.tell( ) >= LEAF_SIZE_TRESHOLD: range_to_leaf[(min_key, key)] = len(blocks) dump_buffer() num_keys = 0 # Empty any remaining data in the buffer. if buffer.tell(): range_to_leaf[(min_key, key)] = len(blocks) dump_buffer() print('Created %d blocks containing world data' % len(blocks)) def build_index_level(range_to_block, level=0): # Get a list of ranges that this index level needs to point to. index_ranges = sorted(range_to_block) # The new list of ranges that the next level of indexes can use. new_ranges = dict() for i in range(0, len(index_ranges), INDEX_MAX_KEYS): ranges = index_ranges[i:i + INDEX_MAX_KEYS] min_key, _ = ranges[0] _, max_key = ranges[-1] left_block = range_to_block[ranges.pop(0)] index_data = io.BytesIO() index_data.write( b'II' + struct.pack('>Bii', level, len(ranges), left_block)) for key_range in ranges: index_data.write(key_range[0] + struct.pack('>i', range_to_block[key_range])) new_ranges[(min_key, max_key)] = len(blocks) blocks.append(index_data.getvalue().ljust(world.block_size, b'\x00')) print('- Created %d index(es) for level %d' % (len(new_ranges), level)) return new_ranges # Build the indexes in multiple levels up to a single root node. print('Creating root node...') root_is_leaf = True level = 0 current_index = range_to_leaf while len(current_index) > 1: current_index = build_index_level(current_index, level) root_is_leaf = False level += 1 root_node = list(current_index.values())[0] # Also build an alternative root node. print('Creating alternate root node...') alternate_root_is_leaf = True level = 0 current_index = range_to_leaf while len(current_index) > 1: current_index = build_index_level(current_index, level) alternate_root_is_leaf = False level += 1 alternate_root_node = list(current_index.values())[0] # The last block will be a free block. blocks.append(b'FF\xFF\xFF\xFF\xFF' + b'\x00' * (world.block_size - 6)) print('Writing all the data to disk...') with open(out_name, 'wb') as f: f.write(b'SBBF02') f.write( struct.pack('>ii?i', world.header_size, world.block_size, True, len(blocks) - 1)) f.write(b'\x00' * (32 - f.tell())) f.write(b'BTreeDB4\x00\x00\x00\x00') f.write( world.identifier.encode('utf-8') + b'\x00' * (12 - len(world.identifier))) f.write( struct.pack('>i?xi?xxxi?', world.key_size, False, root_node, root_is_leaf, alternate_root_node, alternate_root_is_leaf)) f.write(b'\x00' * (world.header_size - f.tell())) for block in blocks: f.write(block) print('Done!')