def createOptimalSchedule(taskList, taskOrdering): lastDay = len(taskList[0].timeWindows) # Create a list with the integer of the latest time anything could be scheduled # in any particular day. Used as a proxy for the end of the day. dayEndings = [0 for i in range(lastDay + 1)] for task in taskList: for index, day in enumerate(task.timeWindows): for timeWindow in day: if timeWindow[1] > dayEndings[index]: dayEndings[index] = timeWindow[1] schedule = Objects.Schedule() for day in range(lastDay): schedule.append(Objects.Route()) # the current index in the time windows list timeWindowIndex = 0 while (timeWindowIndex < len(taskOrdering)): ### maybe this will work... currentTask = taskList[taskOrdering[timeWindowIndex]] isInsertable, endingTime, endingDay, insertPosition = isTaskInsertable(schedule, currentTask, dayEndings, taskList) #print currentTask, isInsertable if (isInsertable): schedule.routeList[endingDay].taskList.insert(insertPosition, currentTask) schedule.routeList[endingDay].endingTimes.insert(insertPosition, endingTime) timeWindowIndex += 1 return schedule
def recall_power(engine, hero): if random.randint(0, 1) == 0: engine.hero = Objects.BrokenMind(hero) engine.notify("Your Mind was broken") else: engine.hero = Objects.DragonStrength(hero) engine.notify("Even Dragon not so strong")
def main(): width = 500 rows = 20 win = pygame.display.set_mode((width, width)) s = Objects.snake((255, 0, 0), (10, 10)) snack = Objects.cube(randomSnack(rows, s), (0, 255, 0)) flag = True clock = pygame.time.Clock() while flag: pygame.time.delay(60) clock.tick(10) keys = GetKeys() if keys[pygame.K_SPACE]: input('unpause with enter') keys = GetKeys() s.move(keys) if s.body[0].pos == snack.pos: s.addCube() snack = Objects.cube(randomSnack(rows, s), colour=(0, 255, 0)) for x in range(len(s.body)): if s.body[x].pos in list(map(lambda z: z.pos, s.body[x+1:])): print('Red score: ', len(s.body)) message_box('you lost', 'play again') s.reset((10, 10)) break redrawWindow(win, width, rows, s, snack) pass
def get_objects(self, _map): #object creation for obj_name in object_list_prob['objects']: prop = object_list_prob['objects'][obj_name] for i in range(random.randint(prop['min-count'], prop['max-count'])): coord = (random.randint(1, 39), random.randint(1, 39)) coord = self.intersect_check(coord, _map, 40, 40) self.objects.append(Objects.Ally( prop['sprite'], prop['action'], coord)) #Ally creation for obj_name in object_list_prob['ally']: prop = object_list_prob['ally'][obj_name] for i in range(random.randint(prop['min-count'], prop['max-count'])): coord = (random.randint(1, 39), random.randint(1, 39)) coord = self.intersect_check(coord, _map, 40, 40) self.objects.append(Objects.Ally( prop['sprite'], prop['action'], coord)) #enemies creation for obj_name in object_list_prob['enemies']: prop = object_list_prob['enemies'][obj_name] for i in range(random.randint(0, 5)): coord = (random.randint(1, 39), random.randint(1, 39)) coord = self.intersect_check(coord, _map, 40, 40) self.objects.append(Objects.Enemy( prop['sprite'], prop['action'], prop['stats'], prop['experience'], coord)) return self.objects
def create_game(sprite_size: int, is_new: bool): global hero, engine, drawer, iteration if is_new: hero = Objects.Hero( base_stats, Objects.create_sprite(os.path.join("texture", "Hero.png"), sprite_size)) engine = Logic.GameEngine() Service.service_init(sprite_size) Service.reload_game(engine, hero) drawer = ScreenEngine.GameSurface( (640, 480), pygame.SRCALPHA, (0, 480), ScreenEngine.ProgressBar( (640, 120), (640, 0), ScreenEngine.InfoWindow( (160, 600), (50, 50), ScreenEngine.HelpWindow( (700, 500), pygame.SRCALPHA, (0, 0), ScreenEngine.ScreenHandle((0, 0)))))) else: engine.sprite_size = sprite_size hero.sprite = Objects.create_sprite( os.path.join("texture", "Hero.png"), sprite_size) Service.service_init(sprite_size, False) Logic.GameEngine.sprite_size = sprite_size drawer.connect_engine(engine) iteration = 0
def main(): d = Objects.DFXMLObject() d.program = sys.argv[0] d.program_version = __version__ dobj.command_line = " ".join(sys.argv) current_appender = d tally = 0 for (event, obj) in Objects.iterparse(args.infile): if event == "start": #Inherit namespaces if isinstance(obj, Objects.DFXMLObject): for (prefix, url) in obj.iter_namespaces(): d.add_namespace(prefix, url) #Group files by volume elif isinstance(obj, Objects.VolumeObject): d.append(obj) current_appender = obj elif event == "end": if isinstance(obj, Objects.VolumeObject): current_appender = d elif isinstance(obj, Objects.FileObject): if "_changed" not in obj.diffs: if "_modified" in obj.diffs or "_renamed" in obj.diffs: current_appender.append(obj) tally += 1 print(d.to_dfxml()) _logger.info("Found %d suspiciously-changed files." % tally)
def main(): dobj = Objects.DFXMLObject(version="1.1.1") fobj = Objects.FileObject() dobj.append(fobj) fobj.filename = "single_file.dat" contents = b"AAAA\n" fobj.filesize = len(contents) _md5er = hashlib.md5() _md5er.update(contents) fobj.md5 = _md5er.hexdigest() _sha1er = hashlib.sha1() _sha1er.update(contents) fobj.sha1 = _sha1er.hexdigest() _sha256er = hashlib.sha256() _sha256er.update(contents) fobj.sha256 = _sha256er.hexdigest() fobj.crtime = "2000-02-03T04:05:06Z" fobj.mtime = "2001-02-03T04:05:06Z" fobj.atime = "2003-02-03T04:05:06Z" fobj.ctime = "2004-02-03T04:05:06Z" with open(args.out_dfxml, "w") as out_fh: dobj.print_dfxml(output_fh=out_fh)
def run_game(): pygame.init() ai_settings= Game_Settings() screen=pygame.display.set_mode( (ai_settings.screen_width,ai_settings.screen_height) ) pygame.display.set_caption("Alien Invasion") player_ship = Objects.Ship(screen, ai_settings) bullets=Group() aliens=Group() play_button = Objects.Button(ai_settings,screen,"Play") function.create_fleet(ai_settings,screen,player_ship,aliens) stats= GameStats(ai_settings) function.update_highscore(stats) score_board = Objects.Scoreboard(ai_settings, screen, stats) while True: function.check_events(ai_settings,screen,player_ship,bullets,stats,play_button,aliens,score_board) if stats.game_active: player_ship.update() function.update_bullets(aliens,bullets,stats,ai_settings,score_board) function.update_aliens(ai_settings,stats,screen,player_ship,aliens,bullets,score_board) function.update_screen(ai_settings,screen,aliens,player_ship,bullets,stats,play_button,score_board)
def main(): d = Objects.DFXMLObject("1.2.0") d.program = sys.argv[0] d.program_version = __version__ d.command_line = " ".join(sys.argv) d.dc["type"] = "File system silent-change report" d.add_creator_library("Python", ".".join(map(str, sys.version_info[0:3]))) #A bit of a bend, but gets the major version information out. d.add_creator_library("Objects.py", Objects.__version__) d.add_creator_library("dfxml.py", Objects.dfxml.__version__) current_appender = d tally = 0 for (event, obj) in Objects.iterparse(args.infile): if event == "start": #Inherit namespaces if isinstance(obj, Objects.DFXMLObject): for (prefix, url) in obj.iter_namespaces(): d.add_namespace(prefix, url) #Group files by volume elif isinstance(obj, Objects.VolumeObject): d.append(obj) current_appender = obj elif event == "end": if isinstance(obj, Objects.VolumeObject): current_appender = d elif isinstance(obj, Objects.FileObject): if "_changed" not in obj.diffs: if "_modified" in obj.diffs or "_renamed" in obj.diffs: current_appender.append(obj) tally += 1 print(d.to_dfxml()) _logger.info("Found %d suspiciously-changed files." % tally)
def __init__(self, manager): super(MenuScene, self).__init__() self.manager = manager self.font = pygame.font.SysFont('Consolas', 56) self.sfont = pygame.font.SysFont('Consolas', 32) loadRoomText = self.font.render('Load Room from file', True, WHITE) loadRoomImage = pygame.Surface(loadRoomText.get_size()) loadRoomImage.fill(BLUE, loadRoomText.get_rect()) loadRoomImage.blit(loadRoomText, (0, 0)) self.load_room_button = Objects.SimpleRectSprite( loadRoomImage.get_rect(), loadRoomImage) newRoomText = self.font.render('Create new room', True, WHITE) newRoomImage = pygame.Surface(newRoomText.get_size()) newRoomImage.fill(BLUE, newRoomText.get_rect()) newRoomImage.blit(newRoomText, (0, 0)) self.new_room_button = Objects.SimpleRectSprite( newRoomImage.get_rect(), newRoomImage) screenrect = self.manager.screen.get_rect() self.load_room_button.rect.center = screenrect.center self.new_room_button.rect.center = screenrect.center self.load_room_button.rect.centery -= window_height / 20 self.new_room_button.rect.centery += window_height / 20 self.buttons = pygame.sprite.RenderUpdates(self.load_room_button, self.new_room_button) if pygame.mixer.get_init(): pygame.mixer.stop() pass
def createMapImage(self): """Create a QImage containing all the different layers of the map.""" # create background, landscape and object layer self.background_layer = Layers.BackgroundLayer() self.landscape_layer = Layers.LandscapeLayer() self.object_layer = Layers.ObjectLayer() # create first tank tank_one = Objects.Tank(0, 200, (self.height // 2), 100, 100, core.Qt.yellow, core.Qt.cyan) self.tanks.append(tank_one) self.getNewCoordinates(self.tanks[0].tank_id, 0) # create second tank tank_two = Objects.Tank( 1, (self.width - 200), (self.height // 2), 100, 100, core.Qt.black, core.Qt.red ) self.tanks.append(tank_two) self.getNewCoordinates(self.tanks[1].tank_id, 0) # paint tanks tmp_painter = gui.QPainter(self.object_layer) tmp_painter.drawImage(self.tanks[0].x_position, self.tanks[0].y_position, self.tanks[0]) tmp_painter.drawImage(self.tanks[1].x_position, self.tanks[1].y_position, self.tanks[1]) tmp_painter.end() # draw all layers self.drawMap() self.drawObjects() self.drawGame() self.setPixmap(gui.QPixmap.fromImage(self.game_image))
def get_objects(self, _map): for obj_name in object_list_prob['objects']: prop = object_list_prob['objects'][obj_name] for i in range( random.randint(prop['min-count'], prop['max-count'])): position = random_position(_map, self.objects) self.objects.append( Objects.Ally(prop['sprite'], prop['action'], position)) for obj_name in object_list_prob['ally']: prop = object_list_prob['ally'][obj_name] for i in range( random.randint(prop['min-count'], prop['max-count'])): position = random_position(_map, self.objects) self.objects.append( Objects.Ally(prop['sprite'], prop['action'], position)) for obj_name in object_list_prob['enemies']: prop = object_list_prob['enemies'][obj_name] for i in range(random.randint(0, 5)): position = random_position(_map, self.objects) self.objects.append( Objects.Enemy(prop['sprite'], prop, prop['experience'], position)) return self.objects
def populate(width, height): people = [] for i in range(100): people.append(Objects.person(width, height, 'safe')) for i in range(15): people.append(Objects.person(width, height, 'ill')) return people
def get_objects(self, _map): for obj_name in object_list_prob['objects']: prop = object_list_prob['objects'][obj_name] for i in range( random.randint(prop['min-count'], prop['max-count'])): coord = (random.randint(1, 39), random.randint(1, 39)) coord = find_free_position(self.objects, _map, coord) self.objects.append( Objects.Ally(prop['sprite'], prop['action'], coord)) for obj_name in object_list_prob['ally']: prop = object_list_prob['ally'][obj_name] for i in range( random.randint(prop['min-count'], prop['max-count'])): coord = (random.randint(1, 39), random.randint(1, len(_map) - 2)) coord = find_free_position(self.objects, _map, coord) self.objects.append( Objects.Ally(prop['sprite'], prop['action'], coord)) for obj_name in object_list_prob['enemies']: prop = object_list_prob['enemies'][obj_name] for i in range(random.randint(0, 5)): coord = (random.randint(1, 30), random.randint(1, 22)) coord = find_free_position(self.objects, _map, coord) self.objects.append( Objects.Enemy(prop['sprite'], prop, prop['experience'], coord)) return self.objects
def __init__(self, profilesList): self.profileList = list() self.order = list() # DFXML and RegXML Objects to store FileObjects # and CellObjects found in the APXML documents self.dfxml_obj = Objects.DFXMLObject() self.regxml_obj = Objects.RegXMLObject() # Keep record of profile name for output self.out_fn = os.path.basename(profilesList[0]) self.out_fn = os.path.splitext(self.out_fn)[0] # Parse each APXML file to a OrderedDict for i, profile in enumerate(profilesList): print(" > %s" % profile) apxml_obj = apxml.iterparse(profile) apxml.generate_stats(apxml_obj) # Split the file system path for the application profile name = profile.split('/') name = name[0] #name = name[len(name) - 1] #name = name.split("-")[0] apxml_obj.name = name self.profileList.append(apxml_obj)
def get_lucky(engine, hero): if random.randint(0, 1) == 1: engine.hero = Objects.GetLuck(hero) engine.notify("You're lucky!!!") else: engine.hero = Objects.BadLuck(hero) engine.notify("Bad luck, man:(")
def get_objects(self, _map): for obj_name in object_list_prob['objects']: prop = object_list_prob['objects'][obj_name] for i in range(random.randint(prop['min-count'], prop['max-count'])): coord = get_random_coord() intersect = True while intersect: intersect = False if _map[coord[1]][coord[0]] == wall: intersect = True coord = get_random_coord() continue for obj in self.objects: if coord == obj.position or coord == (1, 1): intersect = True coord = get_random_coord() self.objects.append(Objects.Ally( prop['sprite'], prop['action'], coord)) for obj_name in object_list_prob['ally']: prop = object_list_prob['ally'][obj_name] for i in range(random.randint(prop['min-count'], prop['max-count'])): coord = get_random_coord() intersect = True while intersect: intersect = False if _map[coord[1]][coord[0]] == wall: intersect = True coord = get_random_coord() continue for obj in self.objects: if coord == obj.position or coord == (1, 1): intersect = True coord = get_random_coord() self.objects.append(Objects.Ally( prop['sprite'], prop['action'], coord)) for obj_name in object_list_prob['enemies']: prop = object_list_prob['enemies'][obj_name] for i in range(random.randint(0, 5)): coord = get_random_coord() intersect = True while intersect: intersect = False if _map[coord[1]][coord[0]] == wall: intersect = True coord = get_random_coord() continue for obj in self.objects: if coord == obj.position or coord == (1, 1): intersect = True coord = get_random_coord() self.objects.append(Objects.Enemy( prop['sprite'], prop, prop['experience'], coord)) return self.objects
def runRaids(self, numberOfRaidWeeks, raidSchedule, numberOfSimulationRuns=1): results_ML = SimLootResult(numberOfRaidWeeks + 1, numberOfSimulationRuns) results_PL = SimLootResult(numberOfRaidWeeks + 1, numberOfSimulationRuns) for i in range(numberOfSimulationRuns): # Copy the constant base roster every time a new group of raids is run roster_ML = deepcopy(self.baseRoster) roster_PL = deepcopy(self.baseRoster) results_ML.itemLevelAverages[i, 0] = self._averageItemLevel(roster_ML) results_PL.itemLevelAverages[i, 0] = self._averageItemLevel(roster_PL) nightholdNormalRaid = Objects.Raid([870, 870, 870, 875, 875, 875, 875, 875, 875, 880]) nightholdHeroicRaid = Objects.Raid([885, 885, 885, 890, 890, 890, 890, 890, 890, 895]) nightholdNormalLootTables = nightholdNormalRaid.getLootTables() nightholdHeroicLootTables = nightholdHeroicRaid.getLootTables() raidSchedule = map(lambda x: x.lower(), raidSchedule) # Run the raids for j in range(numberOfRaidWeeks): raidGroup_ML, raidGroup_PL = self._pickRaidGroup(roster_ML, roster_PL) if 'nighthold normal' in raidSchedule: looted_ML, wastedNoNeed_ML = self._distributeMasterLoot(raidGroup_ML, nightholdNormalLootTables) looted_PL, wastedNoNeed_PL, wastedNotTradeable_PL = self._distributePersonalLoot(raidGroup_PL, nightholdNormalLootTables) results_ML.looted[i, j+1] = looted_ML results_ML.wastedNoNeed[i, j+1] = wastedNoNeed_ML results_PL.looted[i, j+1] = looted_PL results_PL.wastedNoNeed[i, j+1] = wastedNoNeed_PL results_PL.wastedNotTradeable[i, j+1] = wastedNotTradeable_PL if 'nighthold heroic' in raidSchedule: looted_ML, wastedNoNeed_ML = self._distributeMasterLoot(raidGroup_ML, nightholdHeroicLootTables) looted_PL, wastedNoNeed_PL, wastedNotTradeable_PL = self._distributePersonalLoot(raidGroup_PL, nightholdHeroicLootTables) results_ML.looted[i, j+1] = looted_ML results_ML.wastedNoNeed[i, j+1] = wastedNoNeed_ML results_PL.looted[i, j+1] = looted_PL results_PL.wastedNoNeed[i, j+1] = wastedNoNeed_PL results_PL.wastedNotTradeable[i, j+1] = wastedNotTradeable_PL results_PL.itemLevelAverages[i, j+1] = self._averageItemLevel(roster_PL) results_ML.itemLevelAverages[i, j+1] = self._averageItemLevel(roster_ML) # Aggregate per player loot data for player_ML, player_PL in zip(roster_ML, roster_PL): results_ML.addTimeBetweenItem(player_ML.timeBetweenLoots) results_PL.addTimeBetweenItem(player_PL.timeBetweenLoots) results_ML.doStatistics() results_PL.doStatistics() return results_ML, results_PL
def createObject(type, attrs, objs): if type == 'Object': return objects.Object(attrs[0], attrs[1], True, attrs[2], attrs[3]) if type == 'Goal': return objects.Goal(attrs[0], attrs[1], True, attrs[2], attrs[3]) elif type == 'Player': return objects.Character(attrs[0], attrs[1], True, attrs[2], attrs[3]) elif type == 'Mob': return objects.Mobs(attrs[0], attrs[1], True, attrs[2], attrs[3]) elif type == 'Level': return console.Level(attrs[0], objs) # Len = 1 Behaviour is 6
async def ThirdFunction(time): print('Thread 3 started...') await asyncio.sleep(time) O = Obj.CreateVector(4, 3) P = Obj.CreateVector(4, 3) MS = Obj.CreateMatrix(4, 3) MT = Obj.CreateMatrix(4, 3) Result = Opr.SubtractVectors( Opr.MultiplyMatrixVector(Opr.SortMatrix(Opr.MultiplyMatrix(MS, MT)), O), P) print('\nFunction 3 result:\n' + str(Result))
async def SecondFunction(time): print('Thread 2 started...') await asyncio.sleep(time) MG = Obj.CreateMatrix(4, 2) MH = Obj.CreateMatrix(4, 2) MK = Obj.CreateMatrix(4, 2) ML = Obj.CreateMatrix(4, 2) Result = Opr.MultiplyMatrix(Opr.MultiplyMatrix(MG, MH), Opr.SumMatrix(MK, ML)) prettyList = pprint.pformat(Result) print('\nFunction 2 result:\n' + prettyList)
def apply_blessing(engine, hero): if hero.gold >= int(20 * 1.5**engine.level) - 2 * hero.stats["intelligence"]: engine.score += 0.2 hero.gold -= int(20 * 1.5**engine.level) - \ 2 * hero.stats["intelligence"] if random.randint(0, 1) == 0: engine.hero = Objects.Blessing(hero) engine.notify("Blessing applied") else: engine.hero = Objects.Berserk(hero) engine.notify("Berserk applied") else: engine.score -= 0.1
def cast(cls, engine, hero): val = int(20 * 1.5**engine.level) - 2 * hero.stats["intelligence"] if hero.gold >= val: engine.score += 0.2 hero.gold -= val if randint(0, 1) == 0: engine.hero = Objects.Blessing(hero) engine.notify({"info": "Blessing applied"}) else: engine.hero = Objects.Berserk(hero) engine.notify({"info": "Berserk applied"}) else: engine.score -= 0.1
async def FirstFunction(time): print('Thread 1 started...') await asyncio.sleep(time) A = Obj.CreateVector(4, 1) B = Obj.CreateVector(4, 1) C = Obj.CreateVector(4, 1) MA = Obj.CreateMatrix(4, 1) ME = Obj.CreateMatrix(4, 1) #sleep Result = Opr.MultiplyVectors(B, C) + Opr.MultiplyVectors( A, B) + Opr.MultiplyVectors( Opr.MultiplyMatrixVector(Opr.MultiplyMatrix(MA, ME), B), C) print('\nFunction 1 result:\n' + str(Result))
def grab_data(input_file): """Extracts data from an excel file in directory/filename. Data is used to create/return an Experiment object. Precondition: input file is formatted according to generate_sheet/generate_template @type input_file: path @rtype: Experiment """ # Accessing the file from which data is to be grabbed # input_file = os.path.join(directory, filename) input_book = open_workbook(input_file) input_sheet = input_book.sheet_by_index(0) # List where all run info is stored with RunObjects as ind. entries all_analysis_objects = [] # Parsing elution times, correcting for header offset (8) raw_elution_times = input_sheet.col(1) # Col w/elution times given in file elut_ends = [float(x.value) for x in raw_elution_times[8:]] for col_index in range(2, input_sheet.row_len(0)): # Grab individual CATE values of interest run_name = str(input_sheet.cell(0, col_index).value) # in case name is # SA = input_sheet.cell(1, col_index).value root_cnts = input_sheet.cell(2, col_index).value shoot_cnts = input_sheet.cell(3, col_index).value root_weight = input_sheet.cell(4, col_index).value g_factor = input_sheet.cell(5, col_index).value load_time = input_sheet.cell(6, col_index).value # Grabbing elution cpms, correcting for header offset (8) raw_cpm_column = input_sheet.col(col_index)[8:] # Raw counts given by file raw_cpms = [] elution_cpms = [] for item in raw_cpm_column: raw_cpms.append(item.value) if item.value != '': elution_cpms.append(float(item.value)) else: elution_cpms.append(0.0) temp_run = Objects.Run( run_name, SA, root_cnts, shoot_cnts, root_weight, g_factor, load_time, elut_ends, raw_cpms, elution_cpms) all_analysis_objects.append(Objects.Analysis( kind=None, obj_num_pts=None, run=temp_run)) return Objects.Experiment(os.path.dirname(input_file), all_analysis_objects)
def createWalls(): Walls = [ Objects.Boundary((200, 150), (200, 500)), Objects.Boundary((400, 200), (250, 200)), Objects.Boundary((300, 700), (300, 300)), Objects.Boundary((250, 800), (650, 800)), Objects.Boundary((200, 150), (300, 150)), Objects.Boundary((650, 750), (750, 600)), Objects.Boundary((600, 200), (600, 450)), Objects.Boundary((600, 200), (650, 200)), Objects.Boundary((650, 200), (650, 100)), Objects.Boundary((650, 100), (300, 100)) ] return Walls
def add_gold(engine, hero): if random.randint(1, 10) == 1: engine.score -= 0.05 engine.hero = Objects.Weakness(hero) engine.notify("You were cursed") elif random.randint(0, 10) == 10: engine.score -= 0.15 engine.hero = Objects.Poison(hero) engine.notify("You were poisoned") else: engine.score += 0.1 gold = int(random.randint(10, 1000) * (1.1**(engine.hero.level - 1))) hero.gold += gold engine.notify(f"{gold} gold added")
def add_gold(engine, hero): if random.randint(1, 5) == 1: engine.score -= 0.05 engine.hero = Objects.Weakness(hero) engine.notify("You were cursed") elif random.randint(1, 5) == 2: engine.score -= 0.05 engine.hero = Objects.EvilEye(hero) engine.notify('You were evileyed') else: engine.score += 0.1 gold = int(random.randint(10, 1000) * (1.1**(engine.hero.level - 1))) hero.gold += gold engine.notify(f"{gold} gold added")
def create_bricks(self): i = 0 j = 0 list_of_bricks = [] for line in self.grid: for item in line: if item == 1: list_of_bricks.append(Objects.Brick((i*50, j*25))) elif item == 2: list_of_bricks.append(Objects.Brick((i*50, j*25), id=1)) i += 1 i = 0 j +=1 return list_of_bricks
def get_objects(self, _map): stairs = object_list_prob['objects']['stairs'] chest = object_list_prob['objects']['chest'] npc = object_list_prob['ally']['bless'] npc_r = object_list_prob['ally']['coursera'] self.objects.append(Objects.Ally( stairs['sprite'], stairs['action'], (3, 2))) self.objects.append(Objects.Ally( chest['sprite'], chest['action'], (3, 1))) self.objects.append(Objects.Ally( npc['sprite'], npc['action'], (5, 1))) self.objects.append(Objects.Ally( npc_r['sprite'], npc_r['action'], (6, 1))) return self.objects
def spawnEnemy(): e= Objects.Enemy(level) Enemies.append(e) distance = Objects.calcDistance(p1,e) if distance < e.size+p1.size+5: if e in Enemies: Enemies.remove(e) spawnEnemy() else: for g in Enemies: if e!=g: distance = Objects.calcDistance(e,g) if distance < e.size+g.size+5: if e in Enemies: Enemies.remove(e) spawnEnemy()
def setPFTaus(self): pfT=[] for i in range (self.nPfTau): pfT.append(obj.pfTau(self.pfTauPt[i], self.pfTauEta[i], self.pfTauPhi[i], self.pfTauLeadTrackPt[i], self.pfTauTrkIso[i], self.pfTauGammaIso[i])) self.pfTau = pfT
def post(self): #check for user logged in session username = session.get('username') if username is None: flash('you must be logged in to view this area') return redirect(url_for('signinview')) #generate a UUID for this text, and save to memcached. myuuid = request.form.get('postuuid') if myuuid is None: myuuid = uuid.uuid4().hex jobtext = request.form.get('jobtext') myPostCache = UserCache.UserObject('101') #obtype, obid, obdata myPostCache.put('post',myuuid,jobtext) #Add extensions to bbCode parser. parser = Objects.updateParser() #if username is None: #render cached index. #previewtext = bbcode.render_html(jobtext) previewtext = parser.format(jobtext) #else, we need to render our user's screen. headergen = Objects.HTMLSnippet('postheader').html footergen = Objects.HTMLSnippet('footer').html return render_template('postjob.html', headergen=headergen, footergen=footergen, editortext=jobtext, previewtext=previewtext, postuuid=myuuid)
def setRecoPfTaus(self): rpfT=[] for i in range (self.nRecoPfTau): rpfT.append(obj.recoPfTau(self.recoPfTauPt[i], self.recoPfTauEta[i], self.recoPfTauPhi[i], self.recoPfTauLeadTrackPt[i], self.recoPfTauTrkIso[i], self.recoPfTauGammaIso[i])) self.recoPfTau = rpfT
def process_target(self): """ Process the target image. """ print('\n>>> Processing target image for hive files ...') for (event, obj) in Objects.iterparse(self.xmlfile): if isinstance(obj, Objects.FileObject): self.extract_hives(obj) return
def add(self, obj, xy): xy = Player.round(*xy) self.map[xy].append(obj) if Objects.isBlock(obj): self.blocks += 1 else: self.things += 1
def process_dfxml(self): """ Process the target DFXML report. """ print('\n>>> Processing target DFXML report ...') for (event, obj) in Objects.iterparse(self.xmlfile): if isinstance(obj, Objects.FileObject): self.search_dfxml(obj) return
def main(): d = Objects.DFXMLObject(version="1.1.1") d.program = sys.argv[0] d.program_version = __version__ d.command_line = " ".join(sys.argv) _offsets_and_pxml_paths = [] for (lxfno, lxf) in enumerate(args.labeled_xml_file): lxf_parts = lxf.split(":") if len(lxf_parts) != 2 or not lxf_parts[0].isdigit(): raise ValueError("Malformed argument in labeled_xml_file. Expecting space-delimited list of '<number>:<path>'. This entry doesn't work: %r." % lxf) offset = int(lxf_parts[0]) path = lxf_parts[1] _offsets_and_pxml_paths.append((offset,path)) offsets_and_pxml_paths = sorted(_offsets_and_pxml_paths) for (pxml_path_index, (offset, pxml_path)) in enumerate(offsets_and_pxml_paths): _logger.debug("Running on path %r." % pxml_path) pdo = Objects.parse(pxml_path) building_volume = None #Fetch or build volume we'll append if len(pdo.volumes) > 1: raise ValueError("An input DFXML document has multiple volumes; this script assumes each input document only has one. The document here has %d: %r." % (len(pdo.volumes), pxml_path)) elif len(pdo.volumes) == 0: v = Objects.VolumeObject() building_volume = True else: v = pdo.volumes[0] building_volume = False v.partition_offset = offset #Accumulate namespaces for (prefix, url) in pdo.iter_namespaces(): d.add_namespace(prefix, url) for obj in pdo: #Force-update image offsets in byte runs for brs_prop in ["data_brs", "name_brs", "inode_brs"]: if hasattr(obj, brs_prop): brs = getattr(obj, brs_prop) if brs is None: continue for br in brs: if not br.fs_offset is None: br.img_offset = br.fs_offset + offset #For files, set partition identifier and attach to partition if isinstance(obj, Objects.FileObject): obj.partition = pxml_path_index + 1 if building_volume: v.append(obj) #Collect the constructed and/or updated volume d.append(v) d.print_dfxml()
def main(): predicates = { "all": (lambda x: True), "allocated": is_allocated, "new": is_new_file, "mod": is_mod_file, "newormod": is_new_or_mod_file } if args.predicate is None: args.predicate = "new" if args.predicate not in predicates: raise ValueError("--predicate must be from this list: %r. Received: %r." % (predicates.keys(), args.predicate)) if args.xml: d = Objects.parse(args.xml) else: d = Objects.parse(args.disk_image) write_sector_hashes_to_db(args.disk_image, d, is_allocated, args.db_output)
def main(): conn = sqlite3.connect(args.out_db) conn.row_factory = sqlite3.Row cursor = conn.cursor() cursor.execute("""\ CREATE TABLE recs ( osetid TEXT NOT NULL, appetid TEXT NOT NULL, sliceid INTEGER NOT NULL, filename TEXT NOT NULL, hive_prefix TEXT NOT NULL );\ """) #Build RE directory list from slice.db cursor.execute("ATTACH DATABASE '%s' AS slice;" % args.slice_db) cursor.execute("SELECT DISTINCT osetid, appetid, sliceid FROM slice.slice WHERE osetid <> '9544-1';") node_triples = [] for row in cursor: node_triples.append((row["osetid"], row["appetid"], row["sliceid"])) cursor.execute("DETACH DATABASE slice;") #For each extraction.dfxml in an RE directory list for (osetid, appetid, sliceid) in sorted(node_triples): node_id = "%s-%s-%d" % (osetid, appetid, sliceid) extraction_dfxml_path = os.path.join(args.dwf_node_results_dir, node_id, "invoke_regxml_extractor.sh/extraction.dfxml") _logger.debug("extraction_dfxml_path = %r." % extraction_dfxml_path) if not os.path.exists(extraction_dfxml_path): _logger.warning("Skipping non-existent filepath: %r." % extraction_dfxml_path) continue #For each fileobject: for (event, obj) in Objects.iterparse(extraction_dfxml_path): if not isinstance(obj, Objects.FileObject): continue #Record node ID #Record node OS #Record hive filename #Record hive class (normalizer.py) hive_path = obj.original_fileobject.filename norm_prefix = normalizer.hive_path_to_prefix(hive_path) if norm_prefix is None: raise ValueError("File name has no associated prefix: %r." % obj.filename) cursor.execute("INSERT INTO recs(osetid, appetid, sliceid, filename, hive_prefix) VALUES (?,?,?,?,?);", ( osetid, appetid, sliceid, hive_path, norm_prefix )) conn.commit()
def main(): global args #Count allocated files from file system fs_count = 0 _logger.debug("Begin iterparse of %r." % args.fiout_dfxml) for (event, obj) in Objects.iterparse(args.fiout_dfxml): if not isinstance(obj, Objects.FileObject): continue if not obj.is_allocated(): continue fs_count += 1 with open(args.text_output, "w") as fh: fh.write(str(fs_count))
def get(self): #check for user logged in session username = session.get('username') if username is None: flash('you must be logged in to view this area') return redirect(url_for('signinview')) #else, we need to render our user's screen. editortext = "[b]Enter your job description here[/b]. [color=#B22222]Javascript and HTML are prohibited[/color]. [color=#008000]BBCode is valid.[/color]" parser = Objects.updateParser() previewtext = parser.format(editortext) headergen = Objects.HTMLSnippet('postheader').html footergen = Objects.HTMLSnippet('footer').html return render_template('postjob.html', headergen=headergen, footergen=footergen, editortext=editortext)
def main(): counter = collections.defaultdict(lambda: 0) prev_obj = None for (event, obj) in Objects.iterparse(args.input_image): if isinstance(obj, Objects.FileObject): if args.ignore_virtual_files and make_differential_dfxml.ignorable_name(obj.filename): continue counter[(obj.alloc_inode, obj.alloc_name)] += 1 #Inspect weird data if args.debug and obj.alloc_inode is None and obj.alloc_name is None: _logger.debug("Encountered a file with all-null allocation.") _logger.debug("Event: %r." % event) _logger.debug("Previous object: %s." % ET.tostring(prev_obj.to_Element())) _logger.debug("Current object: %s." % ET.tostring(obj.to_Element())) prev_obj = obj print(repr(counter))
def main(): hash_mismatches = 0 for (event, obj) in Objects.iterparse(args.xmlfile): if not isinstance(obj, Objects.FileObject): continue if not obj.filename: continue if not obj.filename.startswith("RAW/"): continue if obj.name_type != "r": continue if obj.sha1 is None: continue path = os.path.join(args.testdir, "partition_1", obj.filename) _logger.debug("Inspecting path: %r." % path) _logger.debug("Recreating SHA-1: %r." % obj.sha1) checker = hashlib.sha1() bytes_ingested = 0 with open(path, "rb") as fh: while True: buf = fh.read(4096) if len(buf) == 0: break bytes_ingested += len(buf) checker.update(buf) checker_sha1 = checker.hexdigest().lower() _logger.debug("Hash of content read from file system: %r." % checker_sha1) if obj.sha1 != checker_sha1: hash_mismatches += 1 _logger.error("Hash mismatch on path: %r." % path) _logger.debug("obj.id = %r." % obj.id) _logger.debug("Bytes ingested = %r." % bytes_ingested) _logger.debug("File's size, from XML = %r." % obj.filesize) st = os.stat(path) _logger.debug("File's size, from stat = %r." % st.st_size) if hash_mismatches != 0: _logger.error("Read incorrect content on %d files." % hash_mismatches) sys.exit(1)
def main(): global args last_file_iter_no = None for (iter_no, (event, obj)) in enumerate(Objects.iterparse(args.input_xml)): if not isinstance(obj, Objects.FileObject): continue last_file_iter_no = iter_no if obj.atime is None: continue if not obj.atime.prec in (2, "2", "2s", (2, "s")): _logger.debug("Object is: %r." % repr(obj)) _logger.info("Encountered precision is: %s." % repr(obj.atime.prec)) raise ValueError("Precision of atime in XTAF is 2 seconds.") _logger.debug("Atime precision good: File %r." % obj.filename) if last_file_iter_no is None: raise ValueError("Did not encounter any files.") _logger.info("Done.")
def main(): #Key: (annotation, histogram) hist = collections.defaultdict(int) for (event, obj) in Objects.iterparse(sys.argv[1]): if event != "end" or not isinstance(obj, Objects.FileObject): continue #Loop through annotations for anno in obj.annos: #Loop through diffs for diff in obj.diffs: hist[(anno, diff)] += 1 annos = Objects.FileObject._diff_attr_names.keys() print(""" <table> <thead> <tr> <th>Property</th> """) for anno in annos: print(" <th>%s</th>" % anno) print(""" </tr> </thead> <tfoot></tfoot> <tbody> """) for diff in sorted(Objects.FileObject._all_properties): print(" <tr>") if diff in Objects.FileObject._incomparable_properties: continue print(" <th style='text-align:left;'>%s</th>" % diff) for anno in annos: print(" <td>%d</td>" % hist[(anno,diff)]) print(" </tr>") print(""" </tbody> </table> """)
def main(): if len(sys.argv) < 2: print("Usage: {} <filename.xml>".format(sys.argv[0])) exit(1) timeline = [] for (event, obj) in Objects.iterparse(sys.argv[1]): # Only work on FileObjects if not isinstance(obj, Objects.FileObject): continue if not obj.mtime is None: timeline.append([obj.mtime, obj.filename, " modified"]) if not obj.crtime is None: timeline.append([obj.crtime, obj.filename, " created"]) if not obj.ctime is None: timeline.append([obj.ctime, obj.filename, " changed"]) if not obj.atime is None: timeline.append([obj.atime, obj.filename, " accessed"]) timeline.sort() for record in timeline: print("\t".join(map(str, record)))
def __init__(self, xmlfile): self.volumes = set() #Key: (allocation status, name type) vector #Value: tally self.broken_out_files = collections.defaultdict(lambda: 0) self.failed = None try: for (event, obj) in Objects.iterparse(xmlfile): if isinstance(obj, Objects.VolumeObject): _logger.debug("Found a volume in %r." % xmlfile) self.volumes.add(obj) elif isinstance(obj, Objects.FileObject): if obj.alloc_inode is None and obj.alloc_name is None: alloc = obj.alloc else: alloc = obj.alloc_inode and obj.alloc_name self.broken_out_files[(alloc, obj.name_type)] += 1 self.failed = False except Exception as e: self.failed = True logging.debug("Exception encountered processing %r." % xmlfile) logging.debug(e)
def main(): dobj = Objects.parse(args.in_dfxml) assert dobj.program == args.expected_program assert dobj.program_version == args.expected_program_version
world = World.World(regions = ['test', 'testb', 'Onette']) # create the world and load the regions into it # testCounter = 0 # def testTimerFunction(args): # print "Timer Alert" + str(args[0]) # args[0] += 1 # testTimer = World.Timer(TIMERS, 1, testTimerFunction, [testCounter], respawns = True) #print TIMERS MobInit.loadMobs() Objects.loadSavedEq() RoomInit.setup() MobInit.loadSavedMobs() Globals.startingRoom = Globals.regionListDict['test']['bullpen'] print 'startingRoom:' + str(Globals.startingRoom) + Globals.startingRoom.region + Globals.startingRoom.name.capitalize()+'\n' print(">> Listening for connections on port %d. CTRL-C to break." % telnet_server.port) #print TIMERS with open('data/OPList', 'r') as f: OPList = f.readlines() #print str(Globals.TIMERS)
_logger.debug("After: " + repr(fi.externals)) assert failed failed = None #Add an element with the colon prefix style e = ET.Element("clam:version") e.text = "20140101" fi.externals.append(e) #Add an element that doesn't have an ET-registered namespace prefix. e = ET.Element("{%s}test2" % XMLNS_TEST_UNREGGED) e.text = "yes" fi.externals.append(e) #Test serialization s = Objects._ET_tostring(fi.to_Element()) #TODO Maybe this should be more than an internal function. _logger.debug(s) if s.find("scan_results") == -1: raise ValueError("Serialization did not output other-namespace element 'scan_results'.") if s.find("clam:version") == -1: raise ValueError("Serialization did not output prefixed element 'clam:version'.") if s.find("test2") == -1: raise ValueError("Serialization did not output unregistered-prefix element 'test2'.") #Test de-serialization fir = Objects.FileObject() x = ET.XML(s) fir.populate_from_Element(x) _logger.debug("De-serialized: %r." % fir.externals) assert len(fir.externals) == 3
def main(self): #_logger.debug("dir(self) = %r." % dir(self)) if not hasattr(self, "imgfile"): self.imgfile = None else: #_logger.debug("Getting real imgfile path.") self.imgfile = os.path.realpath(self.imgfile) #_logger.debug("self.imgfile = %r." % self.imgfile) if not hasattr(self, "xmlfile"): raise RuntimeError("-o xmlfile must be passed on the command line.") _logger.info("Parsing DFXML file...") #Key: Absolute path, including partition designation #Value: Objects.FileObject self.objects_by_path = dict() self.dir_lists_by_path = collections.defaultdict(list) self.volumes = dict() objects_without_inode_numbers = [] for (tup_no, (event, obj)) in enumerate(Objects.iterparse(self.xmlfile)): if not isinstance(obj, Objects.FileObject): continue #_logger.debug("obj.filename = %r." % obj.filename) alloc = obj.is_allocated() if alloc is None: #_logger.debug("Assuming allocated.") pass elif alloc == False: #_logger.debug("Not allocated.") continue if obj.filename is None: #_logger.debug("Null filename.") continue if obj.filename.endswith(("/.", "/..")) or obj.filename in [".", ".."]: #_logger.debug("Dot-dir filename.") continue partition_dir = "partition_" + ("null" if obj.partition is None else str(obj.partition)) if obj.partition not in self.volumes: self.volumes[obj.partition] = obj.volume_object #Might be null. #Every file should end up with an inode number; but they should be assigned after the stream is all visited. if obj.inode is None: objects_without_inode_numbers.append(obj) filepath = partition_dir + "/" + obj.filename self.objects_by_path["/" + filepath] = obj basename = os.path.basename(filepath) dirname = os.path.dirname(filepath) self.dir_lists_by_path["/" + dirname].append(basename) #Shorten reading DFXML files in debug settings if "debug" in self.fuse_args.optlist and tup_no > 50: _logger.debug("Shortening object parsing while in debug mode: Only 50 file objects read from XML.") break #Assign inode numbers for objects that were in the stream first for obj in objects_without_inode_numbers: obj.inode = self._next_inode_number() #Creating the top-level partition directories a loop ago means they need to be created again for the root directory. for partition_number in self.volumes: partition_dir = "partition_" + ("null" if partition_number is None else str(partition_number)) partition_obj = Objects.FileObject() partition_obj.filename = partition_dir partition_obj.filesize = 0 partition_obj.name_type = "d" partition_obj.alloc = True partition_obj.inode = self._next_inode_number() partition_obj.nlink = 2 #This should be adjusted to be 1 + # of directory children. self.objects_by_path["/" + partition_dir] = partition_obj self.dir_lists_by_path["/"].append(partition_dir) _logger.info("Parsed DFXML file.") #_logger.debug("self.objects_by_path = %r." % self.objects_by_path) #_logger.debug("self.dir_lists_by_path = %r." % self.dir_lists_by_path) #_logger.debug("self.volumes = %r." % self.volumes) return fuse.Fuse.main(self)
def Move(): p1.position[0] += p1.speed[0] p1.position[1] -= p1.speed[1] if p1.position[0]<150 or p1.position[0]>650 or p1.position[1]<150 or p1.position[1]>450: Die(p1) p1.speed[0] *= p1.decel p1.speed[1] *= p1.decel if abs(p1.speed[0])<.01: p1.speed[0] = 0 if abs(p1.speed[1])<.01: p1.speed[1] = 0 for e in Enemies: distance = Objects.calcDistance(p1, e) if p1.being_hit == 0 and e.being_hit==0 and distance < (p1.size+e.size-1): p1.being_hit = 1 e.being_hit = 1 calcOrientation(p1) calcOrientation(e) distance = Objects.calcDistance(p1, e) while distance < p1.size+e.size: if p1.position[0]!=e.position[0]: e.position[0]+=.1*(e.position[0]-p1.position[0])/abs(p1.position[0]-e.position[0])*abs(e.position[0]-p1.position[0])/(abs(e.position[1]-p1.position[1])+1) p1.position[0]-=.1*(e.position[0]-p1.position[0])/abs(p1.position[0]-e.position[0])*abs(e.position[0]-p1.position[0])/(abs(e.position[1]-p1.position[1])+1) if p1.position[1]!=e.position[1]: e.position[1]+=.1*(e.position[1]-p1.position[1])/abs(p1.position[1]-e.position[1]) p1.position[1]-=.1*(e.position[1]-p1.position[1])/abs(p1.position[1]-e.position[1]) distance = Objects.calcDistance(p1, e) calcFinalSpeeds(p1, e) for i in range(len(Enemies)): try: if Enemies[i].alive==True: e = Enemies[i] if level>1: if i==3: e.goal[0] = p1.position[0] e.goal[1] = p1.position[1] else: while abs(e.position[0]-e.goal[0])<3: e.goal[0] = randint(e.goalLimits[0],e.goalLimits[2]) while abs(e.position[1]-e.goal[1])<3: e.goal[1] = randint(e.goalLimits[1],e.goalLimits[3]) e.speed[0] -= e.accel * (e.position[0]-e.goal[0])/abs(e.position[0]-e.goal[0]) e.speed[1] += e.accel * (e.position[1]-e.goal[1])/abs(e.position[1]-e.goal[1]) e.position[0] += e.speed[0] e.position[1] -= e.speed[1] if e.position[0]<150 or e.position[0]>650 or e.position[1]<150 or e.position[1]>450: Die(e) e.speed[0] *= e.decel e.speed[1] *= e.decel if abs(e.speed[0])<.01: e.speed[0] = 0 if abs(e.speed[1])<.01: e.speed[1] = 0 for j in range(i,len(Enemies)): try: if Enemies[j].alive==True: g = Enemies[j] if e!=g: distance = Objects.calcDistance(e,g) if e.being_hit == 0 and g.being_hit==0 and distance < (e.size+g.size): e.being_hit = 1 g.being_hit = 1 calcOrientation(e) calcOrientation(g) distance = Objects.calcDistance(e, g) while distance < e.size+g.size: if e.position[0]!=g.position[0]: e.position[0]+=.5*(e.position[0]-g.position[0])/abs(e.position[0]-g.position[0]) if e.position[1]!=g.position[1]: e.position[1]+=.5*(e.position[1]-g.position[1])/abs(g.position[1]-e.position[1]) distance = Objects.calcDistance(e, g) calcFinalSpeeds(e, g) except: continue except: continue
def _lower_ftype_str(vo): """The string labels of file system names might differ by something small like the casing. Normalize the labels by lower-casing them.""" Objects._typecheck(vo, Objects.VolumeObject) f = vo.ftype_str if isinstance(f, str): f = f.lower() return f
def extract_files(image_path, outdir, dfxml_path=None, file_predicate=is_file, file_name=name_with_part_path, dry_run=None, out_manifest_path=None, err_manifest_path=None, keep_going=False): """ @param file_name Unary function. Takes a Objects.FileObject; returns the file path to which this file will be extracted, relative to outdir. So, if outdir="extraction" and the name_with_part_path function of this module is used, the file "/Users/Administrator/ntuser.dat" in partition 1 will be extracted to "extraction/partition_1/Users/Administrator/ntuser.dat". """ extraction_byte_tally = 0 _path_for_iterparse = dfxml_path or image_path #Set up base manifest to track extracted files base_manifest = Objects.DFXMLObject() base_manifest.command_line = " ".join(sys.argv) base_manifest.version = "1.1.0+" base_manifest.add_namespace("extractor", XMLNS_EXTRACTOR) base_manifest.add_namespace("delta", dfxml.XMLNS_DELTA) base_manifest.sources.append(image_path) if dfxml_path: base_manifest.sources.append(dfxml_path) #Clone base manifest to all-files' manifest and errors-only manifest out_manifest = None if out_manifest_path: out_manifest = copy.deepcopy(base_manifest) err_manifest = None if err_manifest_path: err_manifest = copy.deepcopy(base_manifest) for (event, obj) in Objects.iterparse(_path_for_iterparse): #Absolute prerequisites: if not isinstance(obj, Objects.FileObject): continue #Invoker prerequisites if not file_predicate(obj): continue extraction_entry = Objects.FileObject() extraction_entry.original_fileobject = obj #Construct path where the file will be extracted extraction_write_path = os.path.join(outdir, file_name(obj)) #Extract idempotently if os.path.exists(extraction_write_path): _logger.debug("Skipping already-extracted file: %r. Extraction path already exists: %r." % (obj.filename, extraction_write_path)) continue extraction_entry.filename = extraction_write_path #Set up checksum verifier checker = None checked_byte_tally = 0 if obj.sha1: checker = hashlib.sha1() extraction_byte_tally += obj.filesize any_error = None tsk_error = None if not dry_run: extraction_write_dir = os.path.dirname(extraction_write_path) if not os.path.exists(extraction_write_dir): os.makedirs(extraction_write_dir) _logger.debug("Extracting to: %r." % extraction_write_path) with open(extraction_write_path, "wb") as extraction_write_fh: try: for chunk in obj.extract_facet("content", image_path): if checker: checker.update(chunk) checked_byte_tally += len(chunk) extraction_write_fh.write(chunk) if checked_byte_tally != obj.filesize: any_error = True extraction_entry.filesize = checked_byte_tally extraction_entry.diffs.add("filesize") _logger.error("File size mismatch on %r." % obj.filename) _logger.info("Recorded filesize = %r" % obj.filesize) _logger.info("Extracted bytes = %r" % checked_byte_tally) if checker and (obj.sha1 != checker.hexdigest()): any_error = True extraction_entry.sha1 = checker.hexdigest() extraction_entry.diffs.add("sha1") _logger.error("Hash mismatch on %r." % obj.filename) _logger.info("Recorded SHA-1 = %r" % obj.sha1) _logger.info("Computed SHA-1 = %r" % checker.hexdigest()) #_logger.debug("File object: %r." % obj) except Exception as e: any_error = True tsk_error = True extraction_entry.error = "".join(traceback.format_stack()) if e.args: extraction_entry.error += "\n" + str(e.args) if out_manifest: out_manifest.append(extraction_entry) if err_manifest and any_error: err_manifest.append(extraction_entry) if tsk_error and not keep_going: _logger.warning("Terminating extraction loop early, due to encountered error.") break #Report _logger.info("Estimated extraction: %d bytes." % extraction_byte_tally) if not out_manifest is None: with open(out_manifest_path, "w") as out_manifest_fh: out_manifest.print_dfxml(out_manifest_fh) if not err_manifest is None: tally = 0 for obj in err_manifest: if isinstance(obj, Objects.FileObject): tally += 1 _logger.info("Encountered errors extracting %d files." % tally) with open(err_manifest_path, "w") as err_manifest_fh: err_manifest.print_dfxml(err_manifest_fh)
def make_differential_dfxml(pre, post, **kwargs): """ Takes as input two paths to DFXML files. Returns a DFXMLObject. @param pre String. @param post String. @param diff_mode Optional. One of "all" or "idifference". @param retain_unchanged Optional. Boolean. @param ignore_properties Optional. Set. @param annotate_matches Optional. Boolean. True -> matched file objects get a "delta:matched='1'" attribute. @param rename_requires_hash Optional. Boolean. True -> all matches require matching SHA-1's, if present. @param ignore_filename_function Optional. Function, string -> Boolean. Returns True if a file name (which can be null) should be ignored. @param glom_byte_runs Optional. Boolean. Joins contiguous-region byte runs together in FileObject byte run lists. """ diff_mode = kwargs.get("diff_mode", "all") retain_unchanged = kwargs.get("retain_unchanged", False) ignore_properties = kwargs.get("ignore_properties", set()) annotate_matches = kwargs.get("annotate_matches", False) rename_requires_hash = kwargs.get("rename_requires_hash", False) ignore_filename_function = kwargs.get("ignore_filename_function", ignorable_name) glom_byte_runs = kwargs.get("glom_byte_runs", False) _expected_diff_modes = ["all", "idifference"] if diff_mode not in _expected_diff_modes: raise ValueError("Differencing mode should be in: %r." % _expected_diff_modes) diff_mask_set = set() diff_ignore_set = set() if diff_mode == "idifference": diff_mask_set |= set([ "atime", "byte_runs", "crtime", "ctime", "filename", "filesize", "md5", "mtime", "sha1" ]) diff_ignore_set |= ignore_properties _logger.debug("diff_mask_set = " + repr(diff_mask_set)) _logger.debug("diff_ignore_set = " + repr(diff_ignore_set)) #d: The container DFXMLObject, ultimately returned. d = Objects.DFXMLObject(version="1.1.0") d.command_line = " ".join(sys.argv) d.add_namespace("delta", dfxml.XMLNS_DELTA) d.dc["type"] = "Disk image difference set" #The list most of this function is spent on building fileobjects_changed = [] #Unmodified files; only retained if requested. fileobjects_unchanged = [] #Key: (partition, inode, filename); value: FileObject old_fis = None new_fis = None #Key: (partition, inode, filename); value: FileObject list old_fis_unalloc = None new_fis_unalloc = None #Key: Partition byte offset within the disk image, paired with the file system type #Value: VolumeObject old_volumes = None new_volumes = None matched_volumes = dict() #Populated in distinct (offset, file system type as string) encounter order volumes_encounter_order = dict() for infile in [pre, post]: _logger.debug("infile = %r" % infile) old_fis = new_fis new_fis = dict() old_volumes = new_volumes new_volumes = dict() #Fold in the matched volumes - we're just discarding the deleted volumes for k in matched_volumes: old_volumes[k] = matched_volumes[k] matched_volumes = dict() old_fis_unalloc = new_fis_unalloc new_fis_unalloc = collections.defaultdict(list) d.sources.append(infile) for (i, (event, new_obj)) in enumerate(Objects.iterparse(infile)): if isinstance(new_obj, Objects.DFXMLObject): #Inherit desired properties from the source DFXMLObject. #Inherit namespaces for (prefix, url) in new_obj.iter_namespaces(): d.add_namespace(prefix, url) continue elif isinstance(new_obj, Objects.VolumeObject): if event == "end": #This algorithm doesn't yet need to know when a volume is concluded. On to the next object. continue offset = new_obj.partition_offset if offset is None: raise AttributeError("To perform differencing with volumes, the <volume> elements must have a <partition_offset>. Either re-generate your DFXML with partition offsets, or run this program again with the --ignore-volumes flag.") #Use the lower-case volume spelling ftype_str = _lower_ftype_str(new_obj) #Re-capping the general differential analysis algorithm: #0. If the volume is in the new list, something's gone wrong. if (offset, ftype_str) in new_volumes: _logger.debug("new_obj.partition_offset = %r." % offset) _logger.warning("Encountered a volume that starts at an offset as another volume, in the same disk image. This analysis is based on the assumption that that doesn't happen. Check results that depend on partition mappings.") #1. If the volume is in the old list, pop it out of the old list - it's matched. if old_volumes and (offset, ftype_str) in old_volumes: _logger.debug("Found a volume in post image, at offset %r." % offset) old_obj = old_volumes.pop((offset, ftype_str)) new_obj.original_volume = old_obj new_obj.compare_to_original() matched_volumes[(offset, ftype_str)] = new_obj #2. If the volume is NOT in the old list, add it to the new list. else: _logger.debug("Found a new volume, at offset %r." % offset) new_volumes[(offset, ftype_str)] = new_obj volumes_encounter_order[(offset, ftype_str)] = len(new_volumes) + ((old_volumes and len(old_volumes)) or 0) + len(matched_volumes) #3. Afterwards, the old list contains deleted volumes. #Record the ID new_obj.id = volumes_encounter_order[(offset, ftype_str)] #Move on to the next object continue elif not isinstance(new_obj, Objects.FileObject): #The rest of this loop compares only file objects. continue if ignore_filename_function(new_obj.filename): continue #Simplify byte runs if requested if glom_byte_runs: if new_obj.byte_runs: temp_byte_runs = Objects.ByteRuns() for run in new_obj.byte_runs: temp_byte_runs.glom(run) new_obj.byte_runs = temp_byte_runs #Normalize the partition number if new_obj.volume_object is None: new_obj.partition = None else: vo = new_obj.volume_object fts = _lower_ftype_str(vo) new_obj.partition = volumes_encounter_order[(vo.partition_offset, fts)] #Define the identity key of this file -- affected by the --ignore argument _key_partition = None if "partition" in ignore_properties else new_obj.partition _key_inode = None if "inode" in ignore_properties else new_obj.inode _key_filename = None if "filename" in ignore_properties else new_obj.filename key = (_key_partition, _key_inode, _key_filename) #Ignore unallocated content comparisons until a later loop. The unique identification of deleted files needs a little more to work. if not new_obj.alloc: new_fis_unalloc[key].append(new_obj) continue #The rest of this loop is irrelevant until the second file. if old_fis is None: new_fis[key] = new_obj continue if key in old_fis: #Extract the old fileobject and check for changes old_obj = old_fis.pop(key) new_obj.original_fileobject = old_obj new_obj.compare_to_original() #_logger.debug("Diffs: %r." % _diffs) _diffs = new_obj.diffs - diff_ignore_set #_logger.debug("Diffs after ignore-set: %r." % _diffs) if diff_mask_set: _diffs &= diff_mask_set #_logger.debug("Diffs after mask-set: %r." % _diffs) if len(_diffs) > 0: #_logger.debug("Remaining diffs: " + repr(_diffs)) fileobjects_changed.append(new_obj) else: #Unmodified file; only keep if requested. if retain_unchanged: fileobjects_unchanged.append(new_obj) else: #Store the new object new_fis[key] = new_obj #The rest of the files loop is irrelevant until the second file. if old_fis is None: continue _logger.debug("len(old_fis) = %d" % len(old_fis)) _logger.debug("len(new_fis) = %d" % len(new_fis)) _logger.debug("len(fileobjects_changed) = %d" % len(fileobjects_changed)) #Identify renames - only possible if 1-to-1. Many-to-many renames are just left as new and deleted files. _logger.debug("Detecting renames...") fileobjects_renamed = [] def _make_name_map(d): """Returns a dictionary, mapping (partition, inode) -> {filename}.""" retdict = collections.defaultdict(lambda: set()) for (partition, inode, filename) in d.keys(): retdict[(partition, inode)].add(filename) return retdict old_inode_names = _make_name_map(old_fis) new_inode_names = _make_name_map(new_fis) for key in new_inode_names.keys(): (partition, inode) = key if len(new_inode_names[key]) != 1: continue if not key in old_inode_names: continue if len(old_inode_names[key]) != 1: continue if rename_requires_hash: #Peek at the set elements by doing a quite-ephemeral list cast old_obj = old_fis[(partition, inode, list(old_inode_names[key])[0])] new_obj = new_fis[(partition, inode, list(new_inode_names[key])[0])] if old_obj.sha1 != new_obj.sha1: continue #Found a match if we're at this point in the loop old_name = old_inode_names[key].pop() new_name = new_inode_names[key].pop() old_obj = old_fis.pop((partition, inode, old_name)) new_obj = new_fis.pop((partition, inode, new_name)) new_obj.original_fileobject = old_obj new_obj.compare_to_original() fileobjects_renamed.append(new_obj) _logger.debug("len(old_fis) -> %d" % len(old_fis)) _logger.debug("len(new_fis) -> %d" % len(new_fis)) _logger.debug("len(fileobjects_changed) -> %d" % len(fileobjects_changed)) _logger.debug("len(fileobjects_renamed) = %d" % len(fileobjects_renamed)) #Identify files that just changed inode number - basically, doing the rename detection again _logger.debug("Detecting inode number changes...") def _make_inode_map(d): """Returns a dictionary, mapping (partition, filename) -> inode.""" retdict = dict() for (partition, inode, filename) in d.keys(): if (partition, filename) in retdict: _logger.warning("Multiple instances of the file path %r were found in partition %r; this violates an assumption of this program, that paths are unique within partitions." % (filename, partition)) retdict[(partition, filename)] = inode return retdict old_name_inodes = _make_inode_map(old_fis) new_name_inodes = _make_inode_map(new_fis) for key in new_name_inodes.keys(): if not key in old_name_inodes: continue (partition, name) = key old_obj = old_fis.pop((partition, old_name_inodes[key], name)) new_obj = new_fis.pop((partition, new_name_inodes[key], name)) new_obj.original_fileobject = old_obj new_obj.compare_to_original() fileobjects_changed.append(new_obj) _logger.debug("len(old_fis) -> %d" % len(old_fis)) _logger.debug("len(new_fis) -> %d" % len(new_fis)) _logger.debug("len(fileobjects_changed) -> %d" % len(fileobjects_changed)) #And that's the end of the allocated-only, per-volume analysis. #We may be able to match files that aren't allocated against files we think are deleted _logger.debug("Detecting modifications from unallocated files...") fileobjects_deleted = [] for key in new_fis_unalloc: #1 partition; 1 inode number; 1 name, repeated: Too ambiguous to compare. if len(new_fis_unalloc[key]) != 1: continue if key in old_fis_unalloc: if len(old_fis_unalloc[key]) == 1: #The file was unallocated in the previous image, too. old_obj = old_fis_unalloc[key].pop() new_obj = new_fis_unalloc[key].pop() new_obj.original_fileobject = old_obj new_obj.compare_to_original() #The file might not have changed. It's interesting if it did, though. _diffs = new_obj.diffs - diff_mask_set #_logger.debug("Diffs: %r." % _diffs) _diffs = new_obj.diffs - diff_ignore_set #_logger.debug("Diffs after ignore-set: %r." % _diffs) if diff_mask_set: _diffs &= diff_mask_set #_logger.debug("Diffs after mask-set: %r." % _diffs) if len(_diffs) > 0: _logger.debug("Remaining diffs: " + repr(_diffs)) fileobjects_changed.append(new_obj) elif retain_unchanged: fileobjects_unchanged.append(new_obj) elif key in old_fis: #Identified a deletion. old_obj = old_fis.pop(key) new_obj = new_fis_unalloc[key].pop() new_obj.original_fileobject = old_obj new_obj.compare_to_original() fileobjects_deleted.append(new_obj) _logger.debug("len(old_fis) -> %d" % len(old_fis)) _logger.debug("len(new_fis) -> %d" % len(new_fis)) _logger.debug("len(fileobjects_changed) -> %d" % len(fileobjects_changed)) _logger.debug("len(fileobjects_deleted) -> %d" % len(fileobjects_deleted)) #After deletion matching is performed, one might want to look for files migrating to other partitions. #However, since between-volume migration creates a new deleted file, this algorithm instead ignores partition migrations. #AJN TODO Thinking about it a little more, I can't suss out a reason against trying this match. It's complicated if we try looking for reallocations in new_fis, strictly from new_fis_unalloc. #TODO We might also want to match the unallocated objects based on metadata addresses. Unfortunately, that requires implementation of additional byte runs, which hasn't been fully designed yet in the DFXML schema. #Begin output. #First, annotate the volume objects. for key in new_volumes: v = new_volumes[key] v.annos.add("new") for key in old_volumes: v = old_volumes[key] v.annos.add("deleted") for key in matched_volumes: v = matched_volumes[key] if len(v.diffs) > 0: v.annos.add("modified") #Build list of FileObject appenders, child volumes of the DFXML Document. #Key: Partition number, or None #Value: Reference to the VolumeObject corresponding with that partition number. None -> the DFXMLObject. appenders = dict() for volume_dict in [new_volumes, matched_volumes, old_volumes]: for (offset, ftype_str) in volume_dict: veo = volumes_encounter_order[(offset, ftype_str)] if veo in appenders: raise ValueError("This pair is already in the appenders dictionary, which was supposed to be distinct: " + repr((offset, ftype_str)) + ", encounter order " + str(veo) + ".") v = volume_dict[(offset, ftype_str)] appenders[veo] = v d.append(v) #Add in the default appender, the DFXML Document itself. appenders[None] = d content_diffs = set(["md5", "sha1", "mtime"]) def _maybe_match_attr(obj): """Just adds the 'matched' annotation when called.""" if annotate_matches: obj.annos.add("matched") #Populate DFXMLObject. for key in new_fis: #TODO If this script ever does a series of >2 DFXML files, these diff additions need to be removed for the next round. fi = new_fis[key] fi.annos.add("new") appenders[fi.partition].append(fi) for key in new_fis_unalloc: for fi in new_fis_unalloc[key]: fi.annos.add("new") appenders[fi.partition].append(fi) for fi in fileobjects_deleted: #Independently flag for name, content, and metadata modifications if len(fi.diffs - content_diffs) > 0: fi.annos.add("changed") if len(content_diffs.intersection(fi.diffs)) > 0: fi.annos.add("modified") if "filename" in fi.diffs: fi.annos.add("renamed") fi.annos.add("deleted") _maybe_match_attr(fi) appenders[fi.partition].append(fi) for key in old_fis: ofi = old_fis[key] nfi = Objects.FileObject() nfi.original_fileobject = ofi nfi.annos.add("deleted") appenders[ofi.partition].append(nfi) for key in old_fis_unalloc: for ofi in old_fis_unalloc[key]: nfi = Objects.FileObject() nfi.original_fileobject = ofi nfi.annos.add("deleted") appenders[ofi.partition].append(nfi) for fi in fileobjects_renamed: #Independently flag for content and metadata modifications if len(content_diffs.intersection(fi.diffs)) > 0: fi.annos.add("modified") if len(fi.diffs - content_diffs) > 0: fi.annos.add("changed") fi.annos.add("renamed") _maybe_match_attr(fi) appenders[fi.partition].append(fi) for fi in fileobjects_changed: #Independently flag for content and metadata modifications if len(content_diffs.intersection(fi.diffs)) > 0: fi.annos.add("modified") if len(fi.diffs - content_diffs) > 0: fi.annos.add("changed") _maybe_match_attr(fi) appenders[fi.partition].append(fi) for fi in fileobjects_unchanged: _maybe_match_attr(fi) appenders[fi.partition].append(fi) #Output return d
def main(): dobj = Objects.parse(args.in_dfxml) assert not dobj is None _logger.debug("dobj.diff_file_ignores = %r." % dobj.diff_file_ignores) assert "atime" in dobj.diff_file_ignores assert "crtime" in dobj.diff_file_ignores
if __name__ == "__main__": logging.basicConfig(level=logging.INFO) thisdir = os.path.dirname(__file__) tempxml1_path = __file__ + "-test1.xml" tempxml2_path = __file__ + "-test2.xml" d_in_memory = make_differential_dfxml.make_differential_dfxml( os.path.join(thisdir, "../../samples/difference_test_2.xml"), os.path.join(thisdir, "../../samples/difference_test_3.xml") ) #Write and read the DFXML stream a couple times to ensure consistent serialization and deserialization with open(tempxml1_path, "w") as fh: d_in_memory.print_dfxml(output_fh=fh) d_from_disk = Objects.parse(tempxml1_path) with open(tempxml2_path, "w") as fh: d_from_disk.print_dfxml(output_fh=fh) d_from_disk_again = Objects.parse(tempxml2_path) for d in (d_in_memory, d_from_disk, d_from_disk_again): for o in d: _logger.debug(repr(o)) if isinstance(o, Objects.VolumeObject): expected_partition_annos = { (1048576,"FAT16"): set(["deleted"]), (1073741824,"FAT32"): set([]), (2147483648,"FAT32"): set(["deleted"]), (2147483648,"NTFS"): set(["new"]), (4294967296,"FAT32"): set(["new"]) }
from math import * from random import * import Objects pygame.init() Screen = (800,600) FPS = 100 fpsClock = pygame.time.Clock() Font = pygame.font.SysFont("Times New Roman", 32) Font1 = pygame.font.SysFont("Times New Roman", 24) Font2 = pygame.font.SysFont("Times New Roman", 16) Font3 = pygame.font.SysFont("Times New Roman", 60) Objects.init(Screen) Surface = pygame.display.set_mode(Screen) pygame.display.set_caption("Fear The Sphere - David Prorok - 2013") icon = pygame.Surface((1,1)); icon.set_alpha(0); pygame.display.set_icon(icon) stageColor = pygame.Color(250,250,250) backgroundColor = pygame.Color(0,0,0) coins=0 massPrice=35 speedPrice=100 accelPrice=150 lifePrice=500 multiplier = 1 multiplierPrice=100 level = 0