def scan_region_file(scanned_regionfile_obj, options): """ Given a scanned region file object with the information of a region files scans it and returns the same obj filled with the results. If delete_entities is True it will delete entities while scanning entiti_limit is the threshold tof entities to conisder a chunk with too much entities problems. """ o = options delete_entities = o.delete_entities entity_limit = o.entity_limit try: r = scanned_regionfile_obj # counters of problems chunk_count = 0 corrupted = 0 wrong = 0 entities_prob = 0 shared = 0 # used to detect chunks sharing headers offsets = {} filename = r.filename # try to open the file and see if we can parse the header try: region_file = region.RegionFile(r.path) except region.NoRegionHeader: # the region has no header r.status = world.REGION_TOO_SMALL return r except IOError as e: print( "\nWARNING: I can't open the file {0} !\nThe error is \"{1}\".\nTypical causes are file blocked or problems in the file system.\n" .format(filename, e)) r.status = world.REGION_UNREADABLE r.scan_time = time.time() print( "Note: this region file won't be scanned and won't be taken into acount in the summaries" ) # TODO count also this region files return r except: # whatever else print an error and ignore for the scan # not really sure if this is a good solution... print( "\nWARNING: The region file \'{0}\' had an error and couldn't be parsed as region file!\nError:{1}\n" .format(join(split(split(r.path)[0])[1], split(r.path)[1]), sys.exc_info()[0])) print( "Note: this region file won't be scanned and won't be taken into acount." ) print( "Also, this may be a bug. Please, report it if you have the time.\n" ) return None try: # start the scanning of chunks for x in range(32): for z in range(32): # start the actual chunk scanning g_coords = r.get_global_chunk_coords(x, z) chunk, c = scan_chunk(region_file, (x, z), g_coords, o) if c != None: # chunk not created r.chunks[(x, z)] = c chunk_count += 1 else: continue if c[TUPLE_STATUS] == world.CHUNK_OK: if options.name_tags == True: if len(chunk["Level"]["Entities"]) > 0: for idx, val in enumerate( chunk["Level"]["Entities"]): try: if str(val["CustomName"]) != "": # TODO Don't simply print this. Store it to display as part of a summary that doesn't interrupt the progress bar. print( "\n\"{0}\" is currently at X:{1} Z:{2}." .format( val["CustomName"], int( float(val["Pos"] [0].value)), int( float(val["Pos"] [2].value)))) elif str(val["Owner"]) != "": # TODO Don't simply print this. Store it to display as part of a summary that doesn't interrupt the progress bar. print( "\n{0}'s {1} is currently at X:{2} Z:{3}." .format( val["Owner"], val["id"], int( float(val["Pos"] [0].value)), int( float(val["Pos"] [2].value)))) elif str(val["OwnerName"]) != "": # TODO Don't simply print this. Store it to display as part of a summary that doesn't interrupt the progress bar. print( "\n{0}'s horse is currently at X:{1} Z:{2}." .format( val["OwnerName"], int( float(val["Pos"] [0].value)), int( float(val["Pos"] [2].value)))) except: pass continue elif c[TUPLE_STATUS] == world.CHUNK_TOO_MANY_ENTITIES: # deleting entities is in here because parsing a chunk with thousands of wrong entities # takes a long time, and once detected is better to fix it at once. if delete_entities: world.delete_entities(region_file, x, z) print( "Deleted {0} entities in chunk ({1},{2}) of the region file: {3}" .format(c[TUPLE_NUM_ENTITIES], x, z, r.filename)) # entities removed, change chunk status to OK r.chunks[(x, z)] = (0, world.CHUNK_OK) else: entities_prob += 1 # This stores all the entities in a file, # comes handy sometimes. #~ pretty_tree = chunk['Level']['Entities'].pretty_tree() #~ name = "{2}.chunk.{0}.{1}.txt".format(x,z,split(region_file.filename)[1]) #~ archivo = open(name,'w') #~ archivo.write(pretty_tree) elif c[TUPLE_STATUS] == world.CHUNK_CORRUPTED: corrupted += 1 elif c[TUPLE_STATUS] == world.CHUNK_WRONG_LOCATED: wrong += 1 # Now check for chunks sharing offsets: # Please note! region.py will mark both overlapping chunks # as bad (the one stepping outside his territory and the # good one). Only wrong located chunk with a overlapping # flag are really BAD chunks! Use this criterion to # discriminate metadata = region_file.metadata sharing = [ k for k in metadata if (metadata[k].status == region.STATUS_CHUNK_OVERLAPPING and r[k][TUPLE_STATUS] == world.CHUNK_WRONG_LOCATED) ] shared_counter = 0 for k in sharing: r[k] = (r[k][TUPLE_NUM_ENTITIES], world.CHUNK_SHARED_OFFSET) shared_counter += 1 except KeyboardInterrupt: print("\nInterrupted by user\n") # TODO this should't exit sys.exit(1) r.chunk_count = chunk_count r.corrupted_chunks = corrupted r.wrong_located_chunks = wrong r.entities_prob = entities_prob r.shared_offset = shared_counter r.scan_time = time.time() r.status = world.REGION_OK return r # Fatal exceptions: except: # anything else is a ChildProcessException except_type, except_class, tb = sys.exc_info() r = (r.path, r.coords, (except_type, except_class, traceback.extract_tb(tb))) return r
def scan_region_file(scanned_regionfile_obj, options): """ Given a scanned region file object with the information of a region files scans it and returns the same obj filled with the results. If delete_entities is True it will delete entities while scanning entiti_limit is the threshold tof entities to conisder a chunk with too much entities problems. """ o = options delete_entities = o.delete_entities entity_limit = o.entity_limit try: r = scanned_regionfile_obj # counters of problems chunk_count = 0 corrupted = 0 wrong = 0 entities_prob = 0 shared = 0 # used to detect chunks sharing headers offsets = {} filename = r.filename # try to open the file and see if we can parse the header try: region_file = region.RegionFile(r.path) except region.NoRegionHeader: # the region has no header r.status = world.REGION_TOO_SMALL return r except IOError as e: print("\nWARNING: I can't open the file {0} !\nThe error is \"{1}\".\nTypical causes are file blocked or problems in the file system.\n".format(filename,e)) r.status = world.REGION_UNREADABLE r.scan_time = time.time() print("Note: this region file won't be scanned and won't be taken into acount in the summaries") # TODO count also this region files return r except: # whatever else print an error and ignore for the scan # not really sure if this is a good solution... print("\nWARNING: The region file \'{0}\' had an error and couldn't be parsed as region file!\nError:{1}\n".format(join(split(split(r.path)[0])[1], split(r.path)[1]),sys.exc_info()[0])) print("Note: this region file won't be scanned and won't be taken into acount.") print("Also, this may be a bug. Please, report it if you have the time.\n") return None try:# start the scanning of chunks for x in range(32): for z in range(32): # start the actual chunk scanning g_coords = r.get_global_chunk_coords(x, z) chunk, c = scan_chunk(region_file, (x,z), g_coords, o) if c != None: # chunk not created r.chunks[(x,z)] = c chunk_count += 1 else: continue if c[TUPLE_STATUS] == world.CHUNK_OK: if options.name_tags == True: if len(chunk["Level"]["Entities"]) > 0: for idx, val in enumerate(chunk["Level"]["Entities"]): try: if str(val["CustomName"]) != "": # TODO Don't simply print this. Store it to display as part of a summary that doesn't interrupt the progress bar. print("\n\"{0}\" is currently at X:{1} Z:{2}.".format(val["CustomName"], int(float(val["Pos"][0].value)), int(float(val["Pos"][2].value)))) elif str(val["Owner"]) != "": # TODO Don't simply print this. Store it to display as part of a summary that doesn't interrupt the progress bar. print("\n{0}'s {1} is currently at X:{2} Z:{3}.".format(val["Owner"], val["id"], int(float(val["Pos"][0].value)), int(float(val["Pos"][2].value)))) elif str(val["OwnerName"]) != "": # TODO Don't simply print this. Store it to display as part of a summary that doesn't interrupt the progress bar. print("\n{0}'s horse is currently at X:{1} Z:{2}.".format(val["OwnerName"], int(float(val["Pos"][0].value)), int(float(val["Pos"][2].value)))) except: pass continue elif c[TUPLE_STATUS] == world.CHUNK_TOO_MANY_ENTITIES: # deleting entities is in here because parsing a chunk with thousands of wrong entities # takes a long time, and once detected is better to fix it at once. if delete_entities: world.delete_entities(region_file, x, z) print("Deleted {0} entities in chunk ({1},{2}) of the region file: {3}".format(c[TUPLE_NUM_ENTITIES], x, z, r.filename)) # entities removed, change chunk status to OK r.chunks[(x,z)] = (0, world.CHUNK_OK) else: entities_prob += 1 # This stores all the entities in a file, # comes handy sometimes. #~ pretty_tree = chunk['Level']['Entities'].pretty_tree() #~ name = "{2}.chunk.{0}.{1}.txt".format(x,z,split(region_file.filename)[1]) #~ archivo = open(name,'w') #~ archivo.write(pretty_tree) elif c[TUPLE_STATUS] == world.CHUNK_CORRUPTED: corrupted += 1 elif c[TUPLE_STATUS] == world.CHUNK_WRONG_LOCATED: wrong += 1 # Now check for chunks sharing offsets: # Please note! region.py will mark both overlapping chunks # as bad (the one stepping outside his territory and the # good one). Only wrong located chunk with a overlapping # flag are really BAD chunks! Use this criterion to # discriminate metadata = region_file.metadata sharing = [k for k in metadata if ( metadata[k].status == region.STATUS_CHUNK_OVERLAPPING and r[k][TUPLE_STATUS] == world.CHUNK_WRONG_LOCATED)] shared_counter = 0 for k in sharing: r[k] = (r[k][TUPLE_NUM_ENTITIES], world.CHUNK_SHARED_OFFSET) shared_counter += 1 except KeyboardInterrupt: print("\nInterrupted by user\n") # TODO this should't exit sys.exit(1) r.chunk_count = chunk_count r.corrupted_chunks = corrupted r.wrong_located_chunks = wrong r.entities_prob = entities_prob r.shared_offset = shared_counter r.scan_time = time.time() r.status = world.REGION_OK return r # Fatal exceptions: except: # anything else is a ChildProcessException except_type, except_class, tb = sys.exc_info() r = (r.path, r.coords, (except_type, except_class, traceback.extract_tb(tb))) return r
def scan_region_file(scanned_regionfile_obj, entity_limit, delete_entities): """ Scan a region file filling the ScannedRegionFile If delete_entities is True it will delete entities while scanning entiti_limit is the threshold tof entities to conisder a chunk with too much entities problems. """ try: r = scanned_regionfile_obj # counters of problems chunk_count = 0 corrupted = 0 wrong = 0 entities_prob = 0 shared = 0 # used to detect chunks sharing headers offsets = {} filename = r.filename # try to open the file and see if we can parse the header try: region_file = region.RegionFile(r.path) except region.NoRegionHeader: # The region has no header r.status = world.REGION_TOO_SMALL r.scan_time = time() r.scanned = True return r except IOError, e: r.status = world.REGION_UNREADABLE r.scan_time = time() r.scanned = True return r for x in range(32): for z in range(32): # start the actual chunk scanning g_coords = r.get_global_chunk_coords(x, z) chunk, c = scan_chunk(region_file, (x, z), g_coords, entity_limit) if c: r.chunks[(x, z)] = c chunk_count += 1 else: # chunk not created continue if c[TUPLE_STATUS] == world.CHUNK_OK: continue elif c[TUPLE_STATUS] == world.CHUNK_TOO_MANY_ENTITIES: # Deleting entities is in here because parsing a chunk # with thousands of wrong entities takes a long time, # and once detected is better to fix it at once. if delete_entities: world.delete_entities(region_file, x, z) print ("Deleted {0} entities in chunk" " ({1},{2}) of the region file: {3}").format(c[TUPLE_NUM_ENTITIES], x, z, r.filename) # entities removed, change chunk status to OK r.chunks[(x, z)] = (0, world.CHUNK_OK) else: entities_prob += 1 # This stores all the entities in a file, # comes handy sometimes. # ~ pretty_tree = chunk['Level']['Entities'].pretty_tree() # ~ name = "{2}.chunk.{0}.{1}.txt".format(x,z,split(region_file.filename)[1]) # ~ archivo = open(name,'w') # ~ archivo.write(pretty_tree) elif c[TUPLE_STATUS] == world.CHUNK_CORRUPTED: corrupted += 1 elif c[TUPLE_STATUS] == world.CHUNK_WRONG_LOCATED: wrong += 1 # Now check for chunks sharing offsets: # Please note! region.py will mark both overlapping chunks # as bad (the one stepping outside his territory and the # good one). Only wrong located chunk with a overlapping # flag are really BAD chunks! Use this criterion to # discriminate metadata = region_file.metadata sharing = [k for k in metadata if (metadata[k].status == region.STATUS_CHUNK_OVERLAPPING and r[k][TUPLE_STATUS] == world.CHUNK_WRONG_LOCATED)] shared_counter = 0 for k in sharing: r[k] = (r[k][TUPLE_NUM_ENTITIES], world.CHUNK_SHARED_OFFSET) shared_counter += 1 r.chunk_count = chunk_count r.corrupted_chunks = corrupted r.wrong_located_chunks = wrong r.entities_prob = entities_prob r.shared_offset = shared_counter r.scan_time = time() r.status = world.REGION_OK r.scanned = True return r
def scan_region_file(scanned_regionfile_obj, options): """ Given a scanned region file object with the information of a region files scans it and returns the same obj filled with the results. If delete_entities is True it will delete entities while scanning entiti_limit is the threshold tof entities to conisder a chunk with too much entities problems. """ o = options delete_entities = o.delete_entities entity_limit = o.entity_limit try: global cnx # cnx = mysql.connect(user='******', password='', host='localhost', port='3306', database='block_stats') block_aggregation = [0] * 4096 containers = [] biomes = [0] * 256 r = scanned_regionfile_obj # counters of problems chunk_count = 0 corrupted = 0 wrong = 0 entities_prob = 0 shared = 0 # used to detect chunks sharing headers offsets = {} filename = r.filename # try to open the file and see if we can parse the header try: region_file = region.RegionFile(r.path) except region.NoRegionHeader: # the region has no header r.status = world.REGION_TOO_SMALL return r except IOError, e: print "\nWARNING: I can't open the file {0} !\nThe error is \"{1}\".\nTypical causes are file blocked or problems in the file system.\n".format(filename,e) r.status = world.REGION_UNREADABLE r.scan_time = time.time() print "Note: this region file won't be scanned and won't be taken into acount in the summaries" # TODO count also this region files return r except: # whatever else print an error and ignore for the scan # not really sure if this is a good solution... print "\nWARNING: The region file \'{0}\' had an error and couldn't be parsed as region file!\nError:{1}\n".format(join(split(split(r.path)[0])[1], split(r.path)[1]),sys.exc_info()[0]) print "Note: this region file won't be scanned and won't be taken into acount." print "Also, this may be a bug. Please, report it if you have the time.\n" return None try:# start the scanning of chunks for x in range(32): for z in range(32): # start the actual chunk scanning g_coords = r.get_global_chunk_coords(x, z) chunk, c = scan_chunk(region_file, (x,z), g_coords, o, block_aggregation, containers, biomes) if c != None: # chunk not created r.chunks[(x,z)] = c chunk_count += 1 else: continue if c[TUPLE_STATUS] == world.CHUNK_OK: continue elif c[TUPLE_STATUS] == world.CHUNK_TOO_MANY_ENTITIES: # deleting entities is in here because parsing a chunk with thousands of wrong entities # takes a long time, and once detected is better to fix it at once. if delete_entities: world.delete_entities(region_file, x, z) print "Deleted {0} entities in chunk ({1},{2}) of the region file: {3}".format(c[TUPLE_NUM_ENTITIES], x, z, r.filename) # entities removed, change chunk status to OK r.chunks[(x,z)] = (0, world.CHUNK_OK) else: entities_prob += 1 # This stores all the entities in a file, # comes handy sometimes. #~ pretty_tree = chunk['Level']['Entities'].pretty_tree() #~ name = "{2}.chunk.{0}.{1}.txt".format(x,z,split(region_file.filename)[1]) #~ archivo = open(name,'w') #~ archivo.write(pretty_tree) elif c[TUPLE_STATUS] == world.CHUNK_CORRUPTED: corrupted += 1 elif c[TUPLE_STATUS] == world.CHUNK_WRONG_LOCATED: wrong += 1 # Now check for chunks sharing offsets: # Please note! region.py will mark both overlapping chunks # as bad (the one stepping outside his territory and the # good one). Only wrong located chunk with a overlapping # flag are really BAD chunks! Use this criterion to # discriminate metadata = region_file.metadata sharing = [k for k in metadata if ( metadata[k].status == region.STATUS_CHUNK_OVERLAPPING and r[k][TUPLE_STATUS] == world.CHUNK_WRONG_LOCATED)] shared_counter = 0 for k in sharing: r[k] = (r[k][TUPLE_NUM_ENTITIES], world.CHUNK_SHARED_OFFSET) shared_counter += 1 saveBlockStats('region', r.get_coords(), block_aggregation) except KeyboardInterrupt: print "\nInterrupted by user\n" # TODO this should't exit sys.exit(1) r.chunk_count = chunk_count r.corrupted_chunks = corrupted r.wrong_located_chunks = wrong r.entities_prob = entities_prob r.shared_offset = shared_counter r.scan_time = time.time() r.status = world.REGION_OK r.block_aggregation = block_aggregation r.containers = containers r.biomes = biomes return r
def scan_region_file(scanned_regionfile_obj, entity_limit, delete_entities): """ Scan a region file filling the ScannedRegionFile If delete_entities is True it will delete entities while scanning entiti_limit is the threshold tof entities to conisder a chunk with too much entities problems. """ try: r = scanned_regionfile_obj # counters of problems chunk_count = 0 corrupted = 0 wrong = 0 entities_prob = 0 shared = 0 # used to detect chunks sharing headers offsets = {} filename = r.filename # try to open the file and see if we can parse the header try: region_file = region.RegionFile(r.path) except region.NoRegionHeader: # The region has no header r.status = world.REGION_TOO_SMALL r.scan_time = time() r.scanned = True return r except IOError, e: r.status = world.REGION_UNREADABLE r.scan_time = time() r.scanned = True return r for x in range(32): for z in range(32): # start the actual chunk scanning g_coords = r.get_global_chunk_coords(x, z) chunk, c = scan_chunk(region_file, (x, z), g_coords, entity_limit) if c: r.chunks[(x, z)] = c chunk_count += 1 else: # chunk not created continue if c[TUPLE_STATUS] == world.CHUNK_OK: continue elif c[TUPLE_STATUS] == world.CHUNK_TOO_MANY_ENTITIES: # Deleting entities is in here because parsing a chunk # with thousands of wrong entities takes a long time, # and once detected is better to fix it at once. if delete_entities: world.delete_entities(region_file, x, z) print ("Deleted {0} entities in chunk" " ({1},{2}) of the region file: {3}").format( c[TUPLE_NUM_ENTITIES], x, z, r.filename) # entities removed, change chunk status to OK r.chunks[(x, z)] = (0, world.CHUNK_OK) else: entities_prob += 1 # This stores all the entities in a file, # comes handy sometimes. #~ pretty_tree = chunk['Level']['Entities'].pretty_tree() #~ name = "{2}.chunk.{0}.{1}.txt".format(x,z,split(region_file.filename)[1]) #~ archivo = open(name,'w') #~ archivo.write(pretty_tree) elif c[TUPLE_STATUS] == world.CHUNK_CORRUPTED: corrupted += 1 elif c[TUPLE_STATUS] == world.CHUNK_WRONG_LOCATED: wrong += 1 # Now check for chunks sharing offsets: # Please note! region.py will mark both overlapping chunks # as bad (the one stepping outside his territory and the # good one). Only wrong located chunk with a overlapping # flag are really BAD chunks! Use this criterion to # discriminate metadata = region_file.metadata sharing = [k for k in metadata if ( metadata[k].status == region.STATUS_CHUNK_OVERLAPPING and r[k][TUPLE_STATUS] == world.CHUNK_WRONG_LOCATED)] shared_counter = 0 for k in sharing: r[k] = (r[k][TUPLE_NUM_ENTITIES], world.CHUNK_SHARED_OFFSET) shared_counter += 1 r.chunk_count = chunk_count r.corrupted_chunks = corrupted r.wrong_located_chunks = wrong r.entities_prob = entities_prob r.shared_offset = shared_counter r.scan_time = time() r.status = world.REGION_OK r.scanned = True return r
def scan_region_file(scanned_regionfile_obj, options): """ Given a scanned region file object with the information of a region files scans it and returns the same obj filled with the results. If delete_entities is True it will delete entities while scanning entiti_limit is the threshold tof entities to conisder a chunk with too much entities problems. """ o = options delete_entities = o.delete_entities entity_limit = o.entity_limit try: global cnx # cnx = mysql.connect(user='******', password='', host='localhost', port='3306', database='block_stats') block_aggregation = [0] * 4096 containers = [] r = scanned_regionfile_obj # counters of problems chunk_count = 0 corrupted = 0 wrong = 0 entities_prob = 0 shared = 0 # used to detect chunks sharing headers offsets = {} filename = r.filename # try to open the file and see if we can parse the header try: region_file = region.RegionFile(r.path) except region.NoRegionHeader: # the region has no header r.status = world.REGION_TOO_SMALL return r except IOError, e: print "\nWARNING: I can't open the file {0} !\nThe error is \"{1}\".\nTypical causes are file blocked or problems in the file system.\n".format( filename, e) r.status = world.REGION_UNREADABLE r.scan_time = time.time() print "Note: this region file won't be scanned and won't be taken into acount in the summaries" # TODO count also this region files return r except: # whatever else print an error and ignore for the scan # not really sure if this is a good solution... print "\nWARNING: The region file \'{0}\' had an error and couldn't be parsed as region file!\nError:{1}\n".format( join(split(split(r.path)[0])[1], split(r.path)[1]), sys.exc_info()[0]) print "Note: this region file won't be scanned and won't be taken into acount." print "Also, this may be a bug. Please, report it if you have the time.\n" return None try: # start the scanning of chunks for x in range(32): for z in range(32): # start the actual chunk scanning g_coords = r.get_global_chunk_coords(x, z) chunk, c = scan_chunk(region_file, (x, z), g_coords, o, block_aggregation, containers) if c != None: # chunk not created r.chunks[(x, z)] = c chunk_count += 1 else: continue if c[TUPLE_STATUS] == world.CHUNK_OK: continue elif c[TUPLE_STATUS] == world.CHUNK_TOO_MANY_ENTITIES: # deleting entities is in here because parsing a chunk with thousands of wrong entities # takes a long time, and once detected is better to fix it at once. if delete_entities: world.delete_entities(region_file, x, z) print "Deleted {0} entities in chunk ({1},{2}) of the region file: {3}".format( c[TUPLE_NUM_ENTITIES], x, z, r.filename) # entities removed, change chunk status to OK r.chunks[(x, z)] = (0, world.CHUNK_OK) else: entities_prob += 1 # This stores all the entities in a file, # comes handy sometimes. #~ pretty_tree = chunk['Level']['Entities'].pretty_tree() #~ name = "{2}.chunk.{0}.{1}.txt".format(x,z,split(region_file.filename)[1]) #~ archivo = open(name,'w') #~ archivo.write(pretty_tree) elif c[TUPLE_STATUS] == world.CHUNK_CORRUPTED: corrupted += 1 elif c[TUPLE_STATUS] == world.CHUNK_WRONG_LOCATED: wrong += 1 # Now check for chunks sharing offsets: # Please note! region.py will mark both overlapping chunks # as bad (the one stepping outside his territory and the # good one). Only wrong located chunk with a overlapping # flag are really BAD chunks! Use this criterion to # discriminate metadata = region_file.metadata sharing = [ k for k in metadata if (metadata[k].status == region.STATUS_CHUNK_OVERLAPPING and r[k][TUPLE_STATUS] == world.CHUNK_WRONG_LOCATED) ] shared_counter = 0 for k in sharing: r[k] = (r[k][TUPLE_NUM_ENTITIES], world.CHUNK_SHARED_OFFSET) shared_counter += 1 saveBlockStats('region', r.get_coords(), block_aggregation) except KeyboardInterrupt: print "\nInterrupted by user\n" # TODO this should't exit sys.exit(1) r.chunk_count = chunk_count r.corrupted_chunks = corrupted r.wrong_located_chunks = wrong r.entities_prob = entities_prob r.shared_offset = shared_counter r.scan_time = time.time() r.status = world.REGION_OK r.block_aggregation = block_aggregation r.containers = containers return r
def scan_region_file(to_scan_region_file): """ Scans a region file and fills a ScannedRegionFile obj. """ try: r = to_scan_region_file o = scan_region_file.options delete_entities = o.delete_entities entity_limit = o.entity_limit regionset = scan_region_file.regionset region_file = region.RegionFile(r.path) chunk_count = 0 corrupted = 0 wrong = 0 entities_prob = 0 filename = r.filename try: for x in range(32): for z in range(32): chunk, c = scan_chunk(region_file, (x,z), o) if c != None: # chunk not created r.chunks[(x,z)] = c chunk_count += 1 else: continue if c[TUPLE_STATUS] == world.CHUNK_OK: continue elif c[TUPLE_STATUS] == world.CHUNK_TOO_MANY_ENTITIES: # deleting entities is in here because parsing a chunk with thousands of wrong entities # takes a long time, and once detected is better to fix it at once. if delete_entities: world.delete_entities(region_file, x, z) print "Deleted {0} entities in chunk ({1},{2}) of the region file: {3}".format(c[TUPLE_NUM_ENTITIES], x, z, r.filename) # entities removed, change chunk status to OK r.chunks[(x,z)] = (0, world.CHUNK_OK) else: entities_prob += 1 # This stores all the entities in a file, # comes handy sometimes. #~ pretty_tree = chunk['Level']['Entities'].pretty_tree() #~ name = "{2}.chunk.{0}.{1}.txt".format(x,z,split(region_file.filename)[1]) #~ archivo = open(name,'w') #~ archivo.write(pretty_tree) elif c[TUPLE_STATUS] == world.CHUNK_CORRUPTED: corrupted += 1 elif c[TUPLE_STATUS] == world.CHUNK_WRONG_LOCATED: wrong += 1 except KeyboardInterrupt: print "\nInterrupted by user\n" # TODO this should't exit directly in the next verion... sys.exit(1) r.chunk_count = chunk_count r.corrupted_chunks = corrupted r.wrong_located_chunks = wrong r.entities_prob = entities_prob r.scan_time = time.time() scan_region_file.q.put((r, filename, corrupted, wrong, entities_prob, chunk_count)) return # Fatal exceptions: except IOError, e: print "\nWARNING: I can't open the file {0} !\nThe error is \"{1}\".\nTypical causes are file blocked or problems in the file system.\n".format(filename,e) # TODO: This doesn't need to be fatal. scan_region_file.q.put((r, filename, None)) return
def scan_region_file(scanned_regionfile_obj, options): """ Given a scanned region file object with the information of a region files scans it and returns the same obj filled with the results. If delete_entities is True it will delete entities while scanning entiti_limit is the threshold tof entities to conisder a chunk with too much entities problems. """ o = options delete_entities = o.delete_entities entity_limit = o.entity_limit try: r = scanned_regionfile_obj # counters of problems chunk_count = 0 corrupted = 0 wrong = 0 entities_prob = 0 shared = 0 # used to detect chunks sharing headers offsets = {} filename = r.filename # try to open the file and see if we can parse the header try: region_file = region.RegionFile(r.path) except region.NoRegionHeader: # the region has no header r.status = world.REGION_TOO_SMALL return r except IOError, e: print "\nWARNING: I can't open the file {0} !\nThe error is \"{1}\".\nTypical causes are file blocked or problems in the file system.\n".format(filename,e) r.status = world.REGION_UNREADABLE r.scan_time = time.time() print "Note: this region file won't be scanned and won't be taken into acount in the summaries" # TODO count also this region files return r except: # whatever else print an error and ignore for the scan # not really sure if this is a good solution... print "\nWARNING: The region file \'{0}\' had an error and couldn't be parsed as region file!\nError:{1}\n".format(join(split(split(r.path)[0])[1], split(r.path)[1]),sys.exc_info()[0]) print "Note: this region file won't be scanned and won't be taken into acount." print "Also, this may be a bug. Please, report it if you have the time.\n" return None try:# start the scanning of chunks for x in range(32): for z in range(32): # start the actual chunk scanning g_coords = r.get_global_chunk_coords(x, z) chunk, c = scan_chunk(region_file, (x,z), g_coords, o) if c != None: # chunk not created r.chunks[(x,z)] = c chunk_count += 1 else: continue if c[TUPLE_STATUS] == world.CHUNK_OK: continue elif c[TUPLE_STATUS] == world.CHUNK_TOO_MANY_ENTITIES: # deleting entities is in here because parsing a chunk with thousands of wrong entities # takes a long time, and once detected is better to fix it at once. if delete_entities: world.delete_entities(region_file, x, z) print "Deleted {0} entities in chunk ({1},{2}) of the region file: {3}".format(c[TUPLE_NUM_ENTITIES], x, z, r.filename) # entities removed, change chunk status to OK r.chunks[(x,z)] = (0, world.CHUNK_OK) else: entities_prob += 1 # This stores all the entities in a file, # comes handy sometimes. #~ pretty_tree = chunk['Level']['Entities'].pretty_tree() #~ name = "{2}.chunk.{0}.{1}.txt".format(x,z,split(region_file.filename)[1]) #~ archivo = open(name,'w') #~ archivo.write(pretty_tree) elif c[TUPLE_STATUS] == world.CHUNK_CORRUPTED: corrupted += 1 elif c[TUPLE_STATUS] == world.CHUNK_WRONG_LOCATED: wrong += 1 # now check for chunks sharing offsets # TODO: check how much does this slow down the scan process l = sorted(list(region_file.header.keys())) # a header in region.py always contain all the possible entries offsets = [] keys = [] bad_chunks_list = [] shared_counter = 0 for i in range(len(l)): # this is anything but efficient, but works for j in range(i + 1,len(l)): # there is an old header pointing to a non created chunk, skip it if (l[i] not in r.chunks) or (l[j] not in r.chunks) : continue if region_file.header[l[i]][0] == region_file.header[l[j]][0]: # if both chunks share offset the wrong located one will be the bad one. # both chunk could be wrong located and therefore both sharing offset with other chunk # that's why the two if statements if r[l[i]][TUPLE_STATUS] == world.CHUNK_WRONG_LOCATED: r[l[i]] = (r[l[i]][TUPLE_NUM_ENTITIES],world.CHUNK_SHARED_OFFSET) shared_counter += 1 wrong -= 1 bad_chunks_list.append([l[i],region_file.header[l[i]][0]]) if r[l[j]][TUPLE_STATUS] == world.CHUNK_WRONG_LOCATED: r[l[j]] = (r[l[j]][TUPLE_NUM_ENTITIES],world.CHUNK_SHARED_OFFSET) bad_chunks_list.append([l[j],region_file.header[l[j]][0]]) wrong -= 1 shared_counter += 1 except KeyboardInterrupt: print "\nInterrupted by user\n" # TODO this should't exit sys.exit(1) r.chunk_count = chunk_count r.corrupted_chunks = corrupted r.wrong_located_chunks = wrong r.entities_prob = entities_prob r.shared_offset = shared_counter r.scan_time = time.time() r.status = world.REGION_OK return r
def scan_region_file(scanned_regionfile_obj, options): """ Given a scanned region file object with the information of a region files scans it and returns the same obj filled with the results. If delete_entities is True it will delete entities while scanning entiti_limit is the threshold tof entities to conisder a chunk with too much entities problems. """ o = options delete_entities = o.delete_entities entity_limit = o.entity_limit name_tag_log = "" try: r = scanned_regionfile_obj # counters of problems chunk_count = 0 corrupted = 0 wrong = 0 entities_prob = 0 shared = 0 # used to detect chunks sharing headers offsets = {} filename = r.filename # try to open the file and see if we can parse the header try: region_file = region.RegionFile(r.path) except region.NoRegionHeader: # the region has no header r.status = world.REGION_TOO_SMALL return r except IOError, e: print "\nWARNING: I can't open the file {0} !\nThe error is \"{1}\".\nTypical causes are file blocked or problems in the file system.\n".format(filename,e) r.status = world.REGION_UNREADABLE r.scan_time = time.time() print "Note: this region file won't be scanned and won't be taken into acount in the summaries" # TODO count also this region files return r except: # whatever else print an error and ignore for the scan # not really sure if this is a good solution... print "\nWARNING: The region file \'{0}\' had an error and couldn't be parsed as region file!\nError:{1}\n".format(join(split(split(r.path)[0])[1], split(r.path)[1]),sys.exc_info()[0]) print "Note: this region file won't be scanned and won't be taken into acount." print "Also, this may be a bug. Please, report it if you have the time.\n" return None try:# start the scanning of chunks for x in range(32): for z in range(32): # start the actual chunk scanning g_coords = r.get_global_chunk_coords(x, z) chunk, c = scan_chunk(region_file, (x,z), g_coords, o) if c != None: # chunk not created r.chunks[(x,z)] = c chunk_count += 1 else: continue if c[TUPLE_STATUS] == world.CHUNK_OK: if options.name_tags == True: if len(chunk["Level"]["Entities"]) > 0: for idx, val in enumerate(chunk["Level"]["Entities"]): # print val["id"] try: this_name_tag = "" this_customname = "" if 'OwnerUUID' in val: if str(val["OwnerUUID"]) != "": # pprint(PLAYER_DATA) try: username = [u["name"] for u in PLAYER_DATA if u["uuid"] == str(val["OwnerUUID"])] this_owner = username[0] except: this_owner = str(val["OwnerUUID"]) this_owner += "'s " else: this_owner = "" elif 'Owner' in val: if str(val["Owner"]) != "": this_owner = str(val["Owner"]) this_owner += "'s " else: this_owner = "" if 'CustomName' in val: if str(val["CustomName"]) != "": this_customname = "\"" + str(val["CustomName"]) + "\"" # determine if a horse if str(val["id"]) == "EntityHorse": # print val if str(val["Tame"]) == "1": # print "HORSE:" # print "\ttame" # this_name_tag += "\n{0}'s horse {2} is at {3} {4} {5} ({4})".format(str(val["OwnerUUID"]), str(val["CustomName"]), int(float(val["Pos"][0].value)), int(float(val["Pos"][1].value)), int(float(val["Pos"][2].value))) # this_name_tag += "id: {0}".format(str(val["id"])) jump = speed = health = "" for at in val["Attributes"]: # print at if str(at["Name"]) == "horse.jumpStrength": jump = "%.3f" % float(at["Base"].value) elif str(at["Name"]) == "generic.movementSpeed": speed = "%.3f" % float(at["Base"].value) elif str(at["Name"]) == "generic.maxHealth": health = int(float(at["Base"].value)) # Type: The type of the horse. 0 = Horse, 1 = Donkey, 2 = Mule, 3 = Zombie, 4 = Skeleton. if this_customname == "": if str(val["Type"]) == "0": this_customname = "Horse" elif str(val["Type"]) == "1": this_customname = "Donkey" elif str(val["Type"]) == "2": this_customname = "Mule" elif str(val["Type"]) == "3": this_customname = "ZombieHorse" elif str(val["Type"]) == "4": this_customname = "Skeleton" this_name_tag += "{5}{0} is at {1} {2} {3} ({4}: J {6} / S {7} / H {8}).".format(this_customname, int(float(val["Pos"][0].value)), int(float(val["Pos"][1].value)), int(float(val["Pos"][2].value)), val["id"], this_owner, jump, speed, health) print this_name_tag continue # determine if a dog elif str(val["id"]) == "Wolf": if this_owner != "": # print "WOLF:" # print "\ttame" if this_customname == "": this_customname = "wolf" this_name_tag += "{5}{0} is at {1} {2} {3} ({4}).".format(this_customname, int(float(val["Pos"][0].value)), int(float(val["Pos"][1].value)), int(float(val["Pos"][2].value)), val["id"], this_owner) print this_name_tag continue # determine if a cat elif str(val["id"]) == "Ozelot": # print val if 'Tame' in val: if str(val["Tame"]) == "1": # print "CAT:" if this_customname == "": this_customname = "cat" this_name_tag += "{5}{0} is at {1} {2} {3} ({4}).".format(this_customname, int(float(val["Pos"][0].value)), int(float(val["Pos"][1].value)), int(float(val["Pos"][2].value)), val["id"], this_owner) print this_name_tag continue # catch if random creature that's named try: if str(val["CustomName"]) != "": # print "CUSTOMNAME:" # TODO Don't simply print this. Store it to display as part of a summary that doesn't interrupt the progress bar. this_name_tag += "The {4}, {0} is at {1} {2} {3}.".format(this_customname, int(float(val["Pos"][0].value)), int(float(val["Pos"][1].value)), int(float(val["Pos"][2].value)), val["id"]) print this_name_tag continue except: pass except: print "Unexpected error:", sys.exc_info()[0] raise continue elif c[TUPLE_STATUS] == world.CHUNK_TOO_MANY_ENTITIES: # deleting entities is in here because parsing a chunk with thousands of wrong entities # takes a long time, and once detected is better to fix it at once. if delete_entities: world.delete_entities(region_file, x, z) print "Deleted {0} entities in chunk ({1},{2}) of the region file: {3}".format(c[TUPLE_NUM_ENTITIES], x, z, r.filename) # entities removed, change chunk status to OK r.chunks[(x,z)] = (0, world.CHUNK_OK) else: entities_prob += 1 # This stores all the entities in a file, # comes handy sometimes. #~ pretty_tree = chunk['Level']['Entities'].pretty_tree() #~ name = "{2}.chunk.{0}.{1}.txt".format(x,z,split(region_file.filename)[1]) #~ archivo = open(name,'w') #~ archivo.write(pretty_tree) elif c[TUPLE_STATUS] == world.CHUNK_CORRUPTED: corrupted += 1 elif c[TUPLE_STATUS] == world.CHUNK_WRONG_LOCATED: wrong += 1 # Now check for chunks sharing offsets: # Please note! region.py will mark both overlapping chunks # as bad (the one stepping outside his territory and the # good one). Only wrong located chunk with a overlapping # flag are really BAD chunks! Use this criterion to # discriminate metadata = region_file.metadata sharing = [k for k in metadata if ( metadata[k].status == region.STATUS_CHUNK_OVERLAPPING and r[k][TUPLE_STATUS] == world.CHUNK_WRONG_LOCATED)] shared_counter = 0 for k in sharing: r[k] = (r[k][TUPLE_NUM_ENTITIES], world.CHUNK_SHARED_OFFSET) shared_counter += 1 except KeyboardInterrupt: print "\nInterrupted by user\n" # TODO this should't exit sys.exit(1) r.chunk_count = chunk_count r.corrupted_chunks = corrupted r.wrong_located_chunks = wrong r.entities_prob = entities_prob r.shared_offset = shared_counter r.scan_time = time.time() r.status = world.REGION_OK r.name_tag_log = name_tag_log return r