Example #1
0
def _mp_check_region_file(region_file):
    counter.value += 1
    if getsize(region_file) is not 0:
        r = check_region_file(region.RegionFile(region_file), delete_entities,
                              entity_limit)
        _mp_check_region_file.q.put(r)
        return r
    else:
        return None
Example #2
0
    def remove_chunk_entities(self, x, z):
        """ Takes a chunk coordinates, opens the chunk and removes all
            the entities in it. Return an integer with the number of
            entities removed"""
        region_file = region.RegionFile(self.path)
        chunk = region_file.get_chunk(x, z)
        counter = len(chunk['Level']['Entities'])
        empty_tag_list = nbt.TAG_List(nbt.TAG_Byte, '', 'Entities')
        chunk['Level']['Entities'] = empty_tag_list
        region_file.write_chunk(x, z, chunk)

        return counter
def scan_regionset(world_path, regionset, options):
    # for now only scan the overworld
    good_list = []
    bad_list = []
    threshold = options.threshold
    counter = 0
    total = len(regionset)
    for name in regionset:
        reg = region.RegionFile(name)
        if reg.chunk_count > threshold:
            good_list.append(name)
        else:
            # check for good neighbours in a 3x3 grid centered in this
            # region file, if one found this region is a good one
            x, z = get_region_coords(name)
            has_neighbour = False
            for i in range(-1, 2, 1):
                for j in range(-1, 2, 1):
                    if i == 0 and j == 0:
                        continue
                    n = get_region_name(world_path, x + i, z + j)
                    if n in good_list:
                        has_neighbour = True
                        break
                    elif exists(n):
                        nreg = region.RegionFile(n)
                        if nreg.chunk_count > threshold:
                            has_neighbour = True
                            break
                if has_neighbour:
                    break
            if not has_neighbour:
                bad_list.append(name)

        counter += 1
        if not options.only_list:
            # some feedback
            if counter % 20 == 0 or counter == total:
                print "Scanned {0} of {1} region files".format(counter, total)
    return good_list, bad_list, total
Example #4
0
    def remove_problematic_chunks(self, problem):
        """ Removes all the chunks with the given problem, returns a
            counter with the number of deleted chunks. """

        counter = 0
        bad_chunks = self.list_chunks(problem)
        for c in bad_chunks:
            global_coords = c[0]
            status_tuple = c[1]
            local_coords = _get_local_chunk_coords(*global_coords)
            region_file = region.RegionFile(self.path)
            region_file.unlink_chunk(*local_coords)
            counter += 1
            # create the new status tuple
            #                    (num_entities, chunk status)
            self[local_coords] = (0, CHUNK_NOT_CREATED)

        return counter
Example #5
0
def delete_chunk_list(list):
    """ Takes a list generated by check_region_file and deletes
    all the chunks on it and returns the number of deleted chunks"""

    region_file = region_path = None  # variable inizializations

    counter = 0
    for chunk in list:
        x = chunk[1]
        z = chunk[2]

        region_path = chunk[0]
        region_file = region.RegionFile(region_path)
        region_file.unlink_chunk(x, z)
        counter += 1
        region_file.__del__

    return counter
    def remove_problematic_chunks(self, problem):
        """ Removes all the chunks with the given problem, it also
        removes the entry in the dictionary mcr_problems """

        deleted = []
        for reg in self.mcr_problems:
            for chunk in self.mcr_problems[reg]:
                for p in self.mcr_problems[reg][chunk]:
                    if p == problem:
                        region_file = region.RegionFile(reg)
                        region_file.unlink_chunk(chunk[0], chunk[1])
                        deleted.append((reg, chunk, "all"))
                        del region_file

        for d in deleted:
            reg, chunk, prob = d
            self.remove_problem(reg, chunk, prob)

        return len(deleted)
    def delete_chunk_list(self, l):
        """ Deletes the given chunk list from the world. 
            Takes a list of tuples storing:
            (full_mcr_path, chunk_x, chunk_z)
            
            And returns the amount of deleted chunks.
            
            It pritns info in the process."""

        counter = 0
        for region_path, x, z in l:

            region_file = region.RegionFile(region_path)

            if region_file.header[(x, z)][3] == region_file.STATUS_CHUNK_OK:
                region_file.unlink_chunk(x, z)
                counter += 1
            else:
                print "The chunk ({0},{1}) in the region file {2} doesn't exist.".format(
                    x, z, region_path)
            del region_file

        return counter
Example #8
0
    def replace_problematic_chunks(self, backup_worlds, problem, options):
        """ Takes a list of world objects and a problem value and try
            to replace every chunk with that problem using a working
            chunk from the list of world objects. It uses the world
            objects in left to riht order. """

        counter = 0
        for regionset in self.regionsets:
            for backup in backup_worlds:
                # choose the correct regionset based on the dimension
                # folder name
                for temp_regionset in backup.regionsets:
                    if temp_regionset._get_dimension_directory(
                    ) == regionset._get_dimension_directory():
                        b_regionset = temp_regionset
                        break

                # this don't need to be aware of region status, it just
                # iterates the list returned by list_chunks()
                bad_chunks = regionset.list_chunks(problem)

                if bad_chunks and b_regionset._get_dimension_directory(
                ) != regionset._get_dimension_directory():
                    print(
                        "The regionset \'{0}\' doesn't exist in the backup directory. Skipping this backup directory."
                        .format(regionset._get_dimension_directory()))
                else:
                    for c in bad_chunks:
                        global_coords = c[0]
                        status_tuple = c[1]
                        local_coords = _get_local_chunk_coords(*global_coords)
                        print("\n{0:-^60}".format(
                            ' New chunk to replace. Coords: x = {0}; z = {1} '.
                            format(*global_coords)))

                        # search for the region file
                        backup_region_path, local_coords = b_regionset.locate_chunk(
                            global_coords)
                        tofix_region_path, _ = regionset.locate_chunk(
                            global_coords)
                        if exists(backup_region_path):
                            print("Backup region file found in:\n  {0}".format(
                                backup_region_path))

                            # scan the whole region file, pretty slow, but completely needed to detec sharing offset chunks
                            from scan import scan_region_file
                            r = scan_region_file(
                                ScannedRegionFile(backup_region_path), options)
                            try:
                                status_tuple = r[local_coords]
                            except KeyError:
                                status_tuple = None

                            # retrive the status from status_tuple
                            if status_tuple == None:
                                status = CHUNK_NOT_CREATED
                            else:
                                status = status_tuple[TUPLE_STATUS]

                            if status == CHUNK_OK:
                                backup_region_file = region.RegionFile(
                                    backup_region_path)
                                working_chunk = backup_region_file.get_chunk(
                                    local_coords[0], local_coords[1])

                                print("Replacing...")
                                # the chunk exists and is healthy, fix it!
                                tofix_region_file = region.RegionFile(
                                    tofix_region_path)
                                # first unlink the chunk, second write the chunk.
                                # unlinking the chunk is more secure and the only way to replace chunks with
                                # a shared offset withou overwriting the good chunk
                                tofix_region_file.unlink_chunk(*local_coords)
                                tofix_region_file.write_chunk(
                                    local_coords[0], local_coords[1],
                                    working_chunk)
                                counter += 1
                                print("Chunk replaced using backup dir: {0}".
                                      format(backup.path))

                            else:
                                print(
                                    "Can't use this backup directory, the chunk has the status: {0}"
                                    .format(CHUNK_STATUS_TEXT[status]))
                                continue

                        else:
                            print(
                                "The region file doesn't exist in the backup directory: {0}"
                                .format(backup_region_path))

        return counter
Example #9
0
def main():

    usage = 'usage: %prog [options] <world-path>'
    epilog = 'Copyright (C) 2011  Alejandro Aguilera (Fenixin) \
    https://github.com/Fenixin/Minecraft-Region-Fixer                                        \
    This program comes with ABSOLUTELY NO WARRANTY; for details see COPYING.txt. This is free software, and you are welcome to redistribute it under certain conditions; see COPYING.txt for details.'

    parser = OptionParser(description='Script to check the integrity of a region file, \
                                            and to fix it, when posible, using with a backup of the map. \
                                            It uses NBT by twoolie the fork by MidnightLightning. \
                                            Written by Alejandro Aguilera (Fenixin). Sponsored by \
                                            NITRADO Servers (http://nitrado.net)'                                                                                 ,\
                                            prog = 'region-fixer', version='0.0.1', usage=usage, epilog=epilog)
    parser.add_option('--backups',
                      '-b',
                      metavar='<backups>',
                      type=str,
                      dest='backups',
                      help='List of backup directories of the Minecraft \
                                        world to use to fix corrupted chunks and/or wrong located chunks. Warning! This script is not going \
                                        to check if it \'s the same world, so be careful! \
                                        This argument can be a comma separated list (but never with spaces between elements!).',
                      default=None)
    parser.add_option('--fix-corrupted','--fc', dest = 'fix_corrupted', action='store_true', \
                                            help = 'Tries to fix the corrupted chunks using the backups directories', default = False)
    parser.add_option('--fix-wrong-located','--fw', dest = 'fix_wrong_located', action='store_true', \
                                            help = 'Tries to fix the wrong located chunks using the backups directories', default = False)
    parser.add_option(
        '--delete-corrupted',
        '--dc',
        action='store_true',
        help=
        '[WARNING!] This option deletes! And deleting can make you lose data, so be careful! :P \
                                            This option will delete all the corrupted chunks. Used with --fix-corrupted or --fix-wrong-located it will delete all the non-fixed chunks. \
                                            Minecraft will regenerate the chunk.',
        default=False)
    parser.add_option(
        '--delete-wrong-located',
        '--dw',
        action='store_true',
        help=
        '[WARNING!] This option deletes! The same as --delete-corrupted but for \
                                            wrong located chunks',
        default=False)

    parser.add_option(
        '--delete-entities',
        '--de',
        action='store_true',
        help=
        '[WARNING!] This option deletes! This deletes ALL the entities of chunks with more entities than --entity-limit (500 by default). In a Minecraft world entities are mobs and items dropped in the grond, items in chests and other stuff won\'t be touched. Read the README for more info. Region-Fixer will delete the entities when scanning so you can stop and resume the process',
        default=False,
        dest='delete_entities')
    parser.add_option(
        '--entity-limit',
        '--el',
        action='store',
        type=int,
        help=
        'Specify the limit for the --delete-entities option (default = 500).',
        dest='entity_limit',
        default=500,
    )
    parser.add_option(
        '--processes',
        '-p',
        action='store',
        type=int,
        help=
        'Set the number of workers to use for scanning region files. Default is to not use multiprocessing at all',
        default=1)
    parser.add_option(
        '--verbose',
        '-v',
        action='store_true',
        help='Don\'t use progress bar, print a line per scanned region file.')

    # Other options
    other_group = OptionGroup(
        parser, "Others",
        "This option is a different part of the program and is incompatible with the options above."
    )

    other_group.add_option(
        '--delete-list',
        metavar='<delete_list>',
        type=str,
        help=
        'This is a list of chunks to delete, the list must be a python list: \
                                            [(1,1), (-10,32)]. [INFO] if you use this option the world won\'t be scanned. Protect the parthesis from bash!',
        default=None)
    parser.add_option_group(other_group)
    (options, args) = parser.parse_args()

    # Only the world directory goes to args

    if not args:
        parser.error("No world path specified!")
        sys.exit()
    elif len(args) > 1:
        parser.error("Only one world dirctory needed!")
        sys.exit()

    world_path = args[0]
    if not exists(world_path):
        parser.error("The world path doesn't exists!")
        sys.exit()

    # Check basic options incompatibilities
    if options.backups and not (options.fix_corrupted
                                or options.fix_wrong_located):
        parser.error("The option --backups needs one of the --fix-* options")

    if not options.backups and (options.fix_corrupted
                                or options.fix_wrong_located):
        parser.error("The options --fix-* need the --backups option")

    print "Welcome to Region Fixer!"

    # do things with the option args
    level_dat_filename = join(world_path, "level.dat")
    player_files = glob(join(join(world_path, "players"), "*.dat"))

    backups = options.backups
    use_backups = False
    if backups:  # create a list of directories containing the backup of the region files
        backup_dirs = parse_backup_list(backups)
        if not backup_dirs:
            print "[WARNING] No valid backup directories found, won't fix any chunk."
            fix_corrupted = fix_wrong_located = False

        else:
            use_backups = True

    world_region_dir = join(world_path, "region")
    if not exists(world_region_dir):
        print "Error: Doesn't look like a minecraft world directory!"
        sys.exit()

    world_nether_region_dir = join(world_path, "DIM-1/region")
    if not exists(world_nether_region_dir):
        print "Info: No nether dimension in the world directory."

    print "Scanning directory..."
    normal_region_files = glob(world_region_dir + "/r.*.*.mcr")
    nether_region_files = glob(world_nether_region_dir + "/r.*.*.mcr")

    if not normal_region_files and not nether_regioin_files:
        print "Error: No region files found!"
        sys.exit()

    if len(player_files) != 0:
        print "There are {0} region files and {1} player files in the world directory.".format(
            len(normal_region_files) + len(nether_region_files),
            len(player_files))
    else:
        print "There are {0} region files in the world directory.".format(
            len(normal_region_files) + len(nether_region_files))

    region_files = normal_region_files + nether_region_files

    # The program starts
    if options.delete_list:  # Delete the given list of chunks
        try:
            delete_list = eval(options.delete_list)
        except:
            print 'Error: Wrong chunklist!'
            sys.exit()

        delete_list = parse_chunk_list(delete_list, world_region_dir)

        print "{0:#^60}".format(' Deleting the chunks on the list ')

        print "... ",

        counter = delete_chunk_list(delete_list)
        print "Done!"

        print "Deleted {0} chunks".format(counter)

    else:
        # check the level.dat file and the *.dat files in players directory

        print "\n{0:#^60}".format(' Scanning level.dat ')

        if not exists(level_dat_filename):
            print "[WARNING!] \'level.dat\' doesn't exist!"
        else:
            check_level_file(level_dat_filename)

        print "\n{0:#^60}".format(' Scanning player files ')

        problems = 0
        for player in player_files:
            problems += check_player_file(player)

        if not problems:
            print "All player files are readable."

        # check for corrupted chunks

        print "\n{0:#^60}".format(' Scanning region files ')

        total_chunks = 0
        corrupted_chunks = []
        wrong_located_chunks = []
        total_regions = len(region_files)
        counter_region = 0
        pbar = progressbar.ProgressBar(widgets=[
            'Scanning: ',
            FractionWidget(), ' ',
            progressbar.Percentage(), ' ',
            progressbar.Bar(left='[', right=']'), ' ',
            progressbar.ETA()
        ],
                                       maxval=total_regions)
        if abs(options.processes) > 1:
            #there is probably a better way to pass these values but this works for now
            q = multiprocessing.Queue()
            counter_region = multiprocessing.Value('d', 0)

            def _mp_pool_init(prog_counter, del_ents, ent_limit, q):
                _mp_check_region_file.q = q
                global delete_entities
                delete_entities = del_ents
                global entity_limit
                entity_limit = ent_limit
                global counter
                counter = prog_counter

            pool = multiprocessing.Pool(processes=options.processes,
                                        initializer=_mp_pool_init,
                                        initargs=(counter_region,
                                                  options.delete_entities,
                                                  options.entity_limit, q))
            if not options.verbose:
                pbar.start()
            #the chunksize (arg #3) is pretty arbitrary, could probably be tweeked for better performance
            result = pool.map_async(
                _mp_check_region_file, region_files,
                max(1, (total_regions // options.processes) // 8))

            # printing status
            counter = 0
            while not result.ready() or (q.qsize() > 0 and options.verbose):
                #this loop should probably use result.wait(1) but i didn't want to take the time to figure out what wait() returns if it hits the timeout
                time.sleep(0.5)
                if options.verbose:
                    if q.qsize() > 0:
                        filename, corrupted, wrong, total = q.get()
                        counter += 1
                        stats = "(corrupted: {0}, wrong located: {1}, chunks: {2})".format(
                            len(corrupted), len(wrong), total)
                        print "Scanned {0: <15} {1:.<60} {2}/{3}".format(
                            filename, stats, counter, total_regions)
                else:
                    pbar.update(counter_region.value)

            if not options.verbose: pbar.finish()

            # making the results redable for region-fixer
            for r in result.get():
                if r is not None:
                    corrupted_chunks.extend(r[1])
                    wrong_located_chunks.extend(r[2])
                    total_chunks += r[3]
        else:
            if options.verbose == False:
                pbar.start()
            for region_path in region_files:
                counter_region += 1
                if options.verbose:
                    print "Scanning {0}   ...  {1}/{2}".format(
                        region_path, counter_region, total_regions)
                else:
                    pbar.update(counter_region)

                if getsize(
                        region_path
                ) != 0:  # some region files are 0 bytes size! And minecraft seems to handle them without problem.
                    region_file = region.RegionFile(region_path)
                else:
                    continue
                filename, bad_list, wrong_list, chunks = check_region_file(
                    region_file, options.delete_entities, options.entity_limit)

                corrupted_chunks.extend(bad_list)
                wrong_located_chunks.extend(wrong_list)

                total_chunks += chunks

            if not options.verbose:
                pbar.finish()
        print "\nFound {0} corrupted and {1} wrong located chunks of a total of {2}\n".format(
            len(corrupted_chunks), len(wrong_located_chunks), total_chunks)

        # Try to fix corrupted chunks with the backup copy
        if use_backups and (corrupted_chunks or wrong_located_chunks):
            if options.fix_corrupted:
                print "{0:#^60}".format(' Trying to fix corrupted chunks ')
                unfixed_corrupted, counter = replace_chunk_list(
                    corrupted_chunks, backup_dirs)
                print "\n{0} fixed chunks of a total of {1} corrupted chunks".format(
                    counter, len(corrupted_chunks))
                corrupted_chunks = unfixed_corrupted  # prepare the list for the deleting part

            if options.fix_wrong_located:
                print "{0:#^60}".format(' Trying to fix wrong located chunks ')
                unfixed_wrong_located, counter = replace_chunk_list(
                    wrong_located_chunks, backup_dirs)
                print "\n{0} fixed chunks of a total of {1} wrong located chunks".format(
                    counter, len(wrong_located_chunks))
                wrong_located_chunks = unfixed_wrong_located  # prepare the list for the deleting part

        # delete bad chunks! (if asked for)
        if options.delete_corrupted and corrupted_chunks:

            print "{0:#^60}".format(' Deleting  corrupted chunks ')

            print "... ",
            counter = delete_chunk_list(corrupted_chunks)
            print "Done!"

            print "Deleted {0} corrupted chunks".format(counter)

        if options.delete_wrong_located and wrong_located_chunks:

            print "{0:#^60}".format(' Deleting wrong located chunks ')

            print "... ",
            counter = delete_chunk_list(wrong_located_chunks)
            print "Done!"

            print "Deleted {0} wrong located chunks".format(counter)
Example #10
0
def replace_chunk_list(list, backup_list):

    unfixed_list = []
    unfixed_list.extend(list)

    counter = 0

    for corrupted_chunk in list:
        x = corrupted_chunk[1]
        z = corrupted_chunk[2]

        print "\n{0:-^60}".format(' New chunk to fix! ')

        for backup in backup_list:
            Fixed = False
            region_file = split(corrupted_chunk[0])[1]

            # search for the region file
            backup_region_path = glob(join(backup, region_file))[0]
            region_file = corrupted_chunk[0]
            if backup_region_path:
                print "Backup region file found in: {0} \nfixing...".format(
                    backup_region_path)

                # get the chunk
                backup_region_file = region.RegionFile(backup_region_path)
                working_chunk = check_chunk(backup_region_file, x, z)
                backup_region_file.__del__()

                if isinstance(working_chunk, nbt.TAG_Compound):
                    # the chunk exists and is non-corrupted, fix it!
                    tofix_region_file = region.RegionFile(region_file)
                    #~ print corrupted_chunk[1],corrupted_chunk[2],working_chunk
                    #~ print type(corrupted_chunk[1]),type(corrupted_chunk[2]),type(working_chunk)
                    tofix_region_file.write_chunk(corrupted_chunk[1],
                                                  corrupted_chunk[2],
                                                  working_chunk)
                    tofix_region_file.__del__
                    Fixed = True
                    counter += 1
                    unfixed_list.remove(corrupted_chunk)
                    print "Chunk fixed using backup dir: {0}".format(backup)
                    break

                elif working_chunk == None:
                    print "The chunk doesn't exists in this backup directory: {0}".format(
                        backup)
                    # The chunk doesn't exists in the region file
                    continue

                elif working_chunk == -1:
                    # The chunk is corrupted
                    print "The chunk is corrupted in this backup directory: {0}".format(
                        backup)
                    continue

                elif working_chunk == -2:
                    # The chunk is corrupted
                    print "The chunk is wrong located in this backup directory: {0}".format(
                        backup)
                    continue

    return unfixed_list, counter
Example #11
0
def scan_region_file(scanned_regionfile_obj, entity_limit, delete_entities):
    """ Scan a region file filling the ScannedRegionFile

        If delete_entities is True it will delete entities while
        scanning

        entiti_limit is the threshold tof entities to conisder a chunk
        with too much entities problems.
    """
    try:
        r = scanned_regionfile_obj
        # counters of problems
        chunk_count = 0
        corrupted = 0
        wrong = 0
        entities_prob = 0
        shared = 0
        # used to detect chunks sharing headers
        offsets = {}
        filename = r.filename
        # try to open the file and see if we can parse the header
        try:
            region_file = region.RegionFile(r.path)
        except region.NoRegionHeader:  # The region has no header
            r.status = world.REGION_TOO_SMALL
            r.scan_time = time()
            r.scanned = True
            return r
        except IOError, e:
            r.status = world.REGION_UNREADABLE
            r.scan_time = time()
            r.scanned = True
            return r

        for x in range(32):
            for z in range(32):
                # start the actual chunk scanning
                g_coords = r.get_global_chunk_coords(x, z)
                chunk, c = scan_chunk(region_file,
                                      (x, z),
                                      g_coords,
                                      entity_limit)
                if c:
                    r.chunks[(x, z)] = c
                    chunk_count += 1
                else:
                    # chunk not created
                    continue

                if c[TUPLE_STATUS] == world.CHUNK_OK:
                    continue
                elif c[TUPLE_STATUS] == world.CHUNK_TOO_MANY_ENTITIES:
                    # Deleting entities is in here because parsing a chunk
                    # with thousands of wrong entities takes a long time,
                    # and once detected is better to fix it at once.
                    if delete_entities:
                        world.delete_entities(region_file, x, z)
                        print ("Deleted {0} entities in chunk"
                               " ({1},{2}) of the region file: {3}").format(
                                    c[TUPLE_NUM_ENTITIES], x, z, r.filename)
                        # entities removed, change chunk status to OK
                        r.chunks[(x, z)] = (0, world.CHUNK_OK)

                    else:
                        entities_prob += 1
                        # This stores all the entities in a file,
                        # comes handy sometimes.
                        #~ pretty_tree = chunk['Level']['Entities'].pretty_tree()
                        #~ name = "{2}.chunk.{0}.{1}.txt".format(x,z,split(region_file.filename)[1])
                        #~ archivo = open(name,'w')
                        #~ archivo.write(pretty_tree)

                elif c[TUPLE_STATUS] == world.CHUNK_CORRUPTED:
                    corrupted += 1
                elif c[TUPLE_STATUS] == world.CHUNK_WRONG_LOCATED:
                    wrong += 1

        # Now check for chunks sharing offsets:
        # Please note! region.py will mark both overlapping chunks
        # as bad (the one stepping outside his territory and the
        # good one). Only wrong located chunk with a overlapping
        # flag are really BAD chunks! Use this criterion to
        # discriminate
        metadata = region_file.metadata
        sharing = [k for k in metadata if (
            metadata[k].status == region.STATUS_CHUNK_OVERLAPPING and
            r[k][TUPLE_STATUS] == world.CHUNK_WRONG_LOCATED)]
        shared_counter = 0
        for k in sharing:
            r[k] = (r[k][TUPLE_NUM_ENTITIES], world.CHUNK_SHARED_OFFSET)
            shared_counter += 1

        r.chunk_count = chunk_count
        r.corrupted_chunks = corrupted
        r.wrong_located_chunks = wrong
        r.entities_prob = entities_prob
        r.shared_offset = shared_counter
        r.scan_time = time()
        r.status = world.REGION_OK
        r.scanned = True
        return r
Example #12
0
    def replace_problematic_chunks(self, backup_worlds, problem):
        """ Takes a list of world objects and a problem value and try
            to replace every chunk with that problem using a working
            chunk from the list of world objects. It uses the world
            objects in left to riht order. """

        counter = 0
        # this list is used to remove chunks from the problems
        # dict once the iteration over it has finished, doing it at the
        # same time is not a good idea
        fixed_chunks = []

        for mcr_path in self.mcr_problems:
            for chunk in self.mcr_problems[mcr_path]:

                if problem in self.mcr_problems[mcr_path][chunk]:
                    print "\n{0:-^60}".format(' New chunk to fix! ')
                    for backup in backup_worlds:
                        Fixed = False

                        # search for the region file
                        region_name = split(mcr_path)[1]
                        dimension = split(split(mcr_path)[0])[1]
                        if dimension == "region":
                            backup_mcr_path = join(backup.world_path, "region",
                                                   region_name)
                        else:
                            backup_mcr_path = join(backup.world_path,
                                                   dimension, region_name)

                        if exists(backup_mcr_path):
                            print "Backup region file found in: {0} \nfixing...".format(
                                backup_mcr_path)

                            # get the chunk
                            backup_region_file = region.RegionFile(
                                backup_mcr_path)
                            working_chunk = scan_chunk(backup_region_file,
                                                       chunk[0], chunk[1])
                            del backup_region_file

                            ####### TODO TODO TODO
                            # would be cool to check here for entities problem?

                            if isinstance(working_chunk, nbt.TAG_Compound):
                                # the chunk exists and is non-corrupted, fix it!
                                tofix_region_file = region.RegionFile(mcr_path)
                                tofix_region_file.write_chunk(
                                    chunk[0], chunk[1], working_chunk)
                                del tofix_region_file
                                Fixed = True
                                counter += 1
                                fixed_chunks.append((mcr_path, chunk, problem))
                                print "Chunk fixed using backup dir: {0}".format(
                                    backup.world_path)
                                break

                            elif working_chunk == None:
                                print "The chunk doesn't exists in this backup directory: {0}".format(
                                    backup.world_path)
                                # The chunk doesn't exists in the region file
                                continue

                            elif working_chunk == -1:
                                # The chunk is corrupted
                                print "The chunk is corrupted in this backup directory: {0}".format(
                                    backup.world_path)
                                continue

                            elif working_chunk == -2:
                                # The chunk is wrong located
                                print "The chunk is wrong located in this backup directory: {0}".format(
                                    backup.world_path)
                                continue

        for mcr, chunk, problem in fixed_chunks:
            self.remove_problem(mcr, chunk, problem)

        return counter
Example #13
0
def scan_region_file(scanned_regionfile_obj, entity_limit, remove_entities):
    """ Scan a region file filling the ScannedRegionFile object

    Inputs:
     - scanned_regionfile_obj -- ScannedRegionfile object from world.py that will be scanned
     - entity_limit -- An integer, threshold of entities for a chunk to be considered
                     with too many entities
     - remove_entities -- A boolean, defaults to False, to remove the entities while 
                         scanning. This is really handy because opening chunks with
                         too many entities for scanning can take minutes.

    """

    try:
        r = scanned_regionfile_obj

        # try to open the file and see if we can parse the header
        try:
            region_file = region.RegionFile(r.path)
        except region.NoRegionHeader:  # The region has no header
            r.status = c.REGION_TOO_SMALL
            r.scan_time = time()
            r.scanned = True
            return r

        except PermissionError:
            r.status = c.REGION_UNREADABLE_PERMISSION_ERROR
            r.scan_time = time()
            r.scanned = True
            return r

        except IOError:
            r.status = c.REGION_UNREADABLE
            r.scan_time = time()
            r.scanned = True
            return r

        for x in range(32):
            for z in range(32):
                # start the actual chunk scanning
                g_coords = r.get_global_chunk_coords(x, z)
                chunk, tup = scan_chunk(region_file, (x, z), g_coords,
                                        entity_limit)
                if tup:
                    r[(x, z)] = tup
                else:
                    # chunk not created
                    continue

                if tup[c.TUPLE_STATUS] == c.CHUNK_OK:
                    continue
                elif tup[c.TUPLE_STATUS] == c.CHUNK_TOO_MANY_ENTITIES:
                    # Deleting entities is in here because parsing a chunk
                    # with thousands of wrong entities takes a long time,
                    # and sometimes GiB of RAM, and once detected is better
                    # to fix it at once.
                    if remove_entities:
                        world.delete_entities(region_file, x, z)
                        print(("Deleted {0} entities in chunk"
                               " ({1},{2}) of the region file: {3}").format(
                                   tup[c.TUPLE_NUM_ENTITIES], x, z,
                                   r.filename))
                        # entities removed, change chunk status to OK
                        r[(x, z)] = (0, c.CHUNK_OK)

                    else:
                        # This stores all the entities in a file,
                        # comes handy sometimes.
                        # ~ pretty_tree = chunk['Level']['Entities'].pretty_tree()
                        # ~ name = "{2}.chunk.{0}.{1}.txt".format(x,z,split(region_file.filename)[1])
                        # ~ archivo = open(name,'w')
                        # ~ archivo.write(pretty_tree)
                        pass
                elif tup[c.TUPLE_STATUS] == c.CHUNK_CORRUPTED:
                    pass
                elif tup[c.TUPLE_STATUS] == c.CHUNK_WRONG_LOCATED:
                    pass

        # Now check for chunks sharing offsets:
        # Please note! region.py will mark both overlapping chunks
        # as bad (the one stepping outside his territory and the
        # good one). Only wrong located chunk with a overlapping
        # flag are really BAD chunks! Use this criterion to
        # discriminate
        #
        # TODO: Why? I don't remember why
        # TODO: Leave this to nbt, which code is much better than this

        metadata = region_file.metadata
        sharing = [
            k for k in metadata
            if (metadata[k].status == region.STATUS_CHUNK_OVERLAPPING
                and r[k][c.TUPLE_STATUS] == c.CHUNK_WRONG_LOCATED)
        ]
        shared_counter = 0
        for k in sharing:
            r[k] = (r[k][c.TUPLE_NUM_ENTITIES], c.CHUNK_SHARED_OFFSET)
            shared_counter += 1

        r.scan_time = time()
        r.status = c.REGION_OK
        r.scanned = True
        return r

    except KeyboardInterrupt:
        print("\nInterrupted by user\n")
        # TODO this should't exit. It should return to interactive
        # mode if we are in it.
        sys.exit(1)

        # Fatal exceptions:
    except:
        # Anything else is a ChildProcessException
        # NOTE TO SELF: do not try to return the traceback object directly!
        # A multiprocess pythonic hell comes to earth if you do so.
        except_type, except_class, tb = sys.exc_info()
        r = (scanned_regionfile_obj, (except_type, except_class,
                                      extract_tb(tb)))

        return r
Example #14
0
def scan_mcr_file(region_file_path):
    """ Scans a region file reporting problems.
    
    Takes a RegionFile obj and returns a list of corrupted 
    chunks where each element represents a corrupted chunk and
    is a tuple containing:

    (region file, (coord x, coord y), problem) 

    This function is used from scan_all_mcr_files and uses a
    multiprocessing queue to return in real time info about the process.
    """

    delete_entities = scan_mcr_file.options.delete_entities
    entity_limit = scan_mcr_file.options.entity_limit
    region_file = region.RegionFile(region_file_path)
    w = scan_mcr_file.w
    chunks = 0
    problems = []
    corrupted = 0
    wrong = 0
    entities_prob = 0
    filename = split(region_file.filename)[1]
    try:
        for x in range(32):
            for z in range(32):
                chunk, status, error_msg = scan_chunk(region_file, x, z)
                if status == 0:
                    chunks += 1
                    total_entities = len(chunk['Level']['Entities'].tags)
                    # deleting entities is in here because to parse a chunk with thousands of wrong entities
                    # takes a long time, and once detected is better to fix it at once.
                    if total_entities >= entity_limit:
                        if delete_entities == True:
                            empty_tag_list = nbt.TAG_List(nbt.TAG_Byte,'','Entities')
                            chunk['Level']['Entities'] = empty_tag_list
                            print "Deleted {0} entities in chunk ({1},{2}).".format(total_entities, x, z)
                            region_file.write_chunk(x, z, chunk)

                        else:
                            problems.append((region_file.filename,(x,z),w.TOO_MUCH_ENTITIES))
                            entities_prob += 1
                            print "[WARNING!]: The chunk ({0},{1}) in region file {2} has {3} entities, and this may be too much. This may be a problem!".format(x,z,split(region_file.filename)[1],total_entities)

                            # This stores all the entities in a file,
                            # comes handy sometimes.
                            #~ pretty_tree = chunk['Level']['Entities'].pretty_tree()
                            #~ name = "{2}.chunk.{0}.{1}.txt".format(x,z,split(region_file.filename)[1])
                            #~ archivo = open(name,'w')
                            #~ archivo.write(pretty_tree)

                elif status == -1:
                    chunks += 1
                    problems.append((region_file.filename,(x,z),w.CORRUPTED))
                    corrupted += 1
                elif status == -2:
                    chunks += 1
                    problems.append((region_file.filename,(x,z),w.WRONG_LOCATED))
                    wrong += 1
                # if None do nothing

                del chunk # unload chunk from memory

        del region_file

    except KeyboardInterrupt:
        print "\nInterrupted by user\n"
        sys.exit(1)

    scan_mcr_file.q.put((filename, corrupted, wrong, entities_prob, chunks))

    return problems
Example #15
0
    nbtfile = nbt.NBTFile(path)

    # search ender chest items and inventory
    for item_tag in nbtfile["EnderItems"].tags + nbtfile["Inventory"].tags:
        if reset_repair_cost(item_tag):
            reset_count += 1

    if reset_count > 0:
        nbtfile.write_file(path)
        tqdm.write(f"{reset_count} items reset")
        reset_count = 0


print("Searching map")
for path in tqdm(region_files):
    regionfile = region.RegionFile(path)

    for chunk in tqdm(regionfile.iter_chunks(), total=regionfile.chunk_count()):
        reset_count = 0
        # search dropped items and chest minecarts
        for entity in chunk["Level"]["Entities"]:
            try:
                items = entity["Items"]
                for item in items:
                    if reset_repair_cost(item):
                        reset_count += 1
            except KeyError:
                try:
                    if reset_repair_cost(entity):
                        reset_count += 1
                except KeyError:
Example #16
0
    def replace_problematic_regions(self, backup_worlds, problem, options):
        """ Replaces region files with the given problem using a backup
            directory. """
        counter = 0
        for regionset in self.regionsets:
            for backup in backup_worlds:
                # choose the correct regionset based on the dimension
                # folder name
                for temp_regionset in backup.regionsets:
                    if temp_regionset._get_dimension_directory(
                    ) == regionset._get_dimension_directory():
                        b_regionset = temp_regionset
                        break

                bad_regions = regionset.list_regions(problem)
                if bad_regions and b_regionset._get_dimension_directory(
                ) != regionset._get_dimension_directory():
                    print(
                        "The regionset \'{0}\' doesn't exist in the backup directory. Skipping this backup directory."
                        .format(regionset._get_dimension_directory()))
                else:
                    for r in bad_regions:
                        print("\n{0:-^60}".format(
                            ' New region file to replace! Coords {0} '.format(
                                r.get_coords())))

                        # search for the region file

                        try:
                            backup_region_path = b_regionset[
                                r.get_coords()].get_path()
                        except:
                            backup_region_path = None
                        tofix_region_path = r.get_path()

                        if backup_region_path != None and exists(
                                backup_region_path):
                            print("Backup region file found in:\n  {0}".format(
                                backup_region_path))
                            # check the region file, just open it.
                            try:
                                backup_region_file = region.RegionFile(
                                    backup_region_path)
                            except region.NoRegionHeader as e:
                                print(
                                    "Can't use this backup directory, the error while opening the region file: {0}"
                                    .format(e))
                                continue
                            except Exception as e:
                                print(
                                    "Can't use this backup directory, unknown error: {0}"
                                    .format(e))
                                continue
                            copy(backup_region_path, tofix_region_path)
                            print("Region file replaced!")
                            counter += 1
                        else:
                            print(
                                "The region file doesn't exist in the backup directory: {0}"
                                .format(backup_region_path))

        return counter
Example #17
0
def scan_region_file(scanned_regionfile_obj, options):
    """ Given a scanned region file object with the information of a 
        region files scans it and returns the same obj filled with the
        results.
        
        If delete_entities is True it will delete entities while
        scanning
        
        entiti_limit is the threshold tof entities to conisder a chunk
        with too much entities problems.
    """
    o = options
    delete_entities = o.delete_entities
    entity_limit = o.entity_limit
    try:
        r = scanned_regionfile_obj
        # counters of problems
        chunk_count = 0
        corrupted = 0
        wrong = 0
        entities_prob = 0
        shared = 0
        # used to detect chunks sharing headers
        offsets = {}
        filename = r.filename
        # try to open the file and see if we can parse the header
        try:
            region_file = region.RegionFile(r.path)
        except region.NoRegionHeader:  # the region has no header
            r.status = world.REGION_TOO_SMALL
            return r
        except IOError as e:
            print(
                "\nWARNING: I can't open the file {0} !\nThe error is \"{1}\".\nTypical causes are file blocked or problems in the file system.\n"
                .format(filename, e))
            r.status = world.REGION_UNREADABLE
            r.scan_time = time.time()
            print(
                "Note: this region file won't be scanned and won't be taken into acount in the summaries"
            )
            # TODO count also this region files
            return r
        except:  # whatever else print an error and ignore for the scan
            # not really sure if this is a good solution...
            print(
                "\nWARNING: The region file \'{0}\' had an error and couldn't be parsed as region file!\nError:{1}\n"
                .format(join(split(split(r.path)[0])[1],
                             split(r.path)[1]),
                        sys.exc_info()[0]))
            print(
                "Note: this region file won't be scanned and won't be taken into acount."
            )
            print(
                "Also, this may be a bug. Please, report it if you have the time.\n"
            )
            return None

        try:  # start the scanning of chunks

            for x in range(32):
                for z in range(32):

                    # start the actual chunk scanning
                    g_coords = r.get_global_chunk_coords(x, z)
                    chunk, c = scan_chunk(region_file, (x, z), g_coords, o)
                    if c != None:  # chunk not created
                        r.chunks[(x, z)] = c
                        chunk_count += 1
                    else:
                        continue
                    if c[TUPLE_STATUS] == world.CHUNK_OK:
                        if options.name_tags == True:
                            if len(chunk["Level"]["Entities"]) > 0:
                                for idx, val in enumerate(
                                        chunk["Level"]["Entities"]):
                                    try:
                                        if str(val["CustomName"]) != "":
                                            # TODO Don't simply print this. Store it to display as part of a summary that doesn't interrupt the progress bar.
                                            print(
                                                "\n\"{0}\" is currently at X:{1} Z:{2}."
                                                .format(
                                                    val["CustomName"],
                                                    int(
                                                        float(val["Pos"]
                                                              [0].value)),
                                                    int(
                                                        float(val["Pos"]
                                                              [2].value))))
                                        elif str(val["Owner"]) != "":
                                            # TODO Don't simply print this. Store it to display as part of a summary that doesn't interrupt the progress bar.
                                            print(
                                                "\n{0}'s {1} is currently at X:{2} Z:{3}."
                                                .format(
                                                    val["Owner"], val["id"],
                                                    int(
                                                        float(val["Pos"]
                                                              [0].value)),
                                                    int(
                                                        float(val["Pos"]
                                                              [2].value))))
                                        elif str(val["OwnerName"]) != "":
                                            # TODO Don't simply print this. Store it to display as part of a summary that doesn't interrupt the progress bar.
                                            print(
                                                "\n{0}'s horse is currently at X:{1} Z:{2}."
                                                .format(
                                                    val["OwnerName"],
                                                    int(
                                                        float(val["Pos"]
                                                              [0].value)),
                                                    int(
                                                        float(val["Pos"]
                                                              [2].value))))
                                    except:
                                        pass
                        continue
                    elif c[TUPLE_STATUS] == world.CHUNK_TOO_MANY_ENTITIES:
                        # deleting entities is in here because parsing a chunk with thousands of wrong entities
                        # takes a long time, and once detected is better to fix it at once.
                        if delete_entities:
                            world.delete_entities(region_file, x, z)
                            print(
                                "Deleted {0} entities in chunk ({1},{2}) of the region file: {3}"
                                .format(c[TUPLE_NUM_ENTITIES], x, z,
                                        r.filename))
                            # entities removed, change chunk status to OK
                            r.chunks[(x, z)] = (0, world.CHUNK_OK)

                        else:
                            entities_prob += 1
                            # This stores all the entities in a file,
                            # comes handy sometimes.
                            #~ pretty_tree = chunk['Level']['Entities'].pretty_tree()
                            #~ name = "{2}.chunk.{0}.{1}.txt".format(x,z,split(region_file.filename)[1])
                            #~ archivo = open(name,'w')
                            #~ archivo.write(pretty_tree)

                    elif c[TUPLE_STATUS] == world.CHUNK_CORRUPTED:
                        corrupted += 1
                    elif c[TUPLE_STATUS] == world.CHUNK_WRONG_LOCATED:
                        wrong += 1

            # Now check for chunks sharing offsets:
            # Please note! region.py will mark both overlapping chunks
            # as bad (the one stepping outside his territory and the
            # good one). Only wrong located chunk with a overlapping
            # flag are really BAD chunks! Use this criterion to
            # discriminate
            metadata = region_file.metadata
            sharing = [
                k for k in metadata
                if (metadata[k].status == region.STATUS_CHUNK_OVERLAPPING
                    and r[k][TUPLE_STATUS] == world.CHUNK_WRONG_LOCATED)
            ]
            shared_counter = 0
            for k in sharing:
                r[k] = (r[k][TUPLE_NUM_ENTITIES], world.CHUNK_SHARED_OFFSET)
                shared_counter += 1

        except KeyboardInterrupt:
            print("\nInterrupted by user\n")
            # TODO this should't exit
            sys.exit(1)

        r.chunk_count = chunk_count
        r.corrupted_chunks = corrupted
        r.wrong_located_chunks = wrong
        r.entities_prob = entities_prob
        r.shared_offset = shared_counter
        r.scan_time = time.time()
        r.status = world.REGION_OK
        return r

        # Fatal exceptions:
    except:
        # anything else is a ChildProcessException
        except_type, except_class, tb = sys.exc_info()
        r = (r.path, r.coords, (except_type, except_class,
                                traceback.extract_tb(tb)))
        return r
def scan_region_file(scanned_regionfile_obj, options):
    """ Given a scanned region file object with the information of a 
        region files scans it and returns the same obj filled with the
        results.
        
        If delete_entities is True it will delete entities while
        scanning
        
        entiti_limit is the threshold tof entities to conisder a chunk
        with too much entities problems.
    """

    o = options
    delete_entities = o.delete_entities
    entity_limit = o.entity_limit
    try:
        global cnx
        # cnx = mysql.connect(user='******', password='', host='localhost', port='3306', database='block_stats')
        block_aggregation = [0] * 4096
        containers = []
        r = scanned_regionfile_obj

        # counters of problems
        chunk_count = 0
        corrupted = 0
        wrong = 0
        entities_prob = 0
        shared = 0
        # used to detect chunks sharing headers
        offsets = {}
        filename = r.filename
        # try to open the file and see if we can parse the header
        try:
            region_file = region.RegionFile(r.path)
        except region.NoRegionHeader:  # the region has no header
            r.status = world.REGION_TOO_SMALL
            return r
        except IOError, e:
            print "\nWARNING: I can't open the file {0} !\nThe error is \"{1}\".\nTypical causes are file blocked or problems in the file system.\n".format(
                filename, e)
            r.status = world.REGION_UNREADABLE
            r.scan_time = time.time()
            print "Note: this region file won't be scanned and won't be taken into acount in the summaries"
            # TODO count also this region files
            return r
        except:  # whatever else print an error and ignore for the scan
            # not really sure if this is a good solution...
            print "\nWARNING: The region file \'{0}\' had an error and couldn't be parsed as region file!\nError:{1}\n".format(
                join(split(split(r.path)[0])[1],
                     split(r.path)[1]),
                sys.exc_info()[0])
            print "Note: this region file won't be scanned and won't be taken into acount."
            print "Also, this may be a bug. Please, report it if you have the time.\n"
            return None

        try:  # start the scanning of chunks

            for x in range(32):
                for z in range(32):

                    # start the actual chunk scanning
                    g_coords = r.get_global_chunk_coords(x, z)
                    chunk, c = scan_chunk(region_file, (x, z), g_coords, o,
                                          block_aggregation, containers)

                    if c != None:  # chunk not created
                        r.chunks[(x, z)] = c
                        chunk_count += 1
                    else:
                        continue
                    if c[TUPLE_STATUS] == world.CHUNK_OK:
                        continue
                    elif c[TUPLE_STATUS] == world.CHUNK_TOO_MANY_ENTITIES:
                        # deleting entities is in here because parsing a chunk with thousands of wrong entities
                        # takes a long time, and once detected is better to fix it at once.
                        if delete_entities:
                            world.delete_entities(region_file, x, z)
                            print "Deleted {0} entities in chunk ({1},{2}) of the region file: {3}".format(
                                c[TUPLE_NUM_ENTITIES], x, z, r.filename)
                            # entities removed, change chunk status to OK
                            r.chunks[(x, z)] = (0, world.CHUNK_OK)

                        else:
                            entities_prob += 1
                            # This stores all the entities in a file,
                            # comes handy sometimes.
                            #~ pretty_tree = chunk['Level']['Entities'].pretty_tree()
                            #~ name = "{2}.chunk.{0}.{1}.txt".format(x,z,split(region_file.filename)[1])
                            #~ archivo = open(name,'w')
                            #~ archivo.write(pretty_tree)

                    elif c[TUPLE_STATUS] == world.CHUNK_CORRUPTED:
                        corrupted += 1
                    elif c[TUPLE_STATUS] == world.CHUNK_WRONG_LOCATED:
                        wrong += 1

            # Now check for chunks sharing offsets:
            # Please note! region.py will mark both overlapping chunks
            # as bad (the one stepping outside his territory and the
            # good one). Only wrong located chunk with a overlapping
            # flag are really BAD chunks! Use this criterion to
            # discriminate
            metadata = region_file.metadata
            sharing = [
                k for k in metadata
                if (metadata[k].status == region.STATUS_CHUNK_OVERLAPPING
                    and r[k][TUPLE_STATUS] == world.CHUNK_WRONG_LOCATED)
            ]
            shared_counter = 0
            for k in sharing:
                r[k] = (r[k][TUPLE_NUM_ENTITIES], world.CHUNK_SHARED_OFFSET)
                shared_counter += 1
            saveBlockStats('region', r.get_coords(), block_aggregation)

        except KeyboardInterrupt:
            print "\nInterrupted by user\n"
            # TODO this should't exit
            sys.exit(1)

        r.chunk_count = chunk_count
        r.corrupted_chunks = corrupted
        r.wrong_located_chunks = wrong
        r.entities_prob = entities_prob
        r.shared_offset = shared_counter
        r.scan_time = time.time()
        r.status = world.REGION_OK
        r.block_aggregation = block_aggregation
        r.containers = containers
        return r