def write_fat_entry(self, cluster_id: int, value: typ.Union[int, str]) -> None: """ write a given value into FAT tables requires that FAT object holds self._fat_entry attribute with a valid construct.Mapping :param cluster_id: int, cluster_id to write the value into :param value: int or string, value that gets written into FAT use integer for valid following cluster_ids use string 'free_cluster', 'bad_cluster' or 'last_cluster' without need to distinguish between different FAT versions. :raises: AttributeError, AssertionError, FieldError """ # make sure cluster_id is valid if cluster_id < 0 or cluster_id >= self.entries_per_fat: raise AttributeError("cluster_id out of bounds") # make sure user does not input invalid values as next cluster if isinstance(value, int): assert value < self._fat_entry.encoding['bad_cluster'], \ "next_cluster value must be < " \ + str(self._fat_entry.encoding['bad_cluster']) \ + ". For last cluster use 'last_cluster'. For " \ + "bad_cluster use 'bad_cluster'" assert value >= 2, "next_cluster value must be >= 2. For " \ + "free_cluster use 'free_cluster'" # get start position of FAT0 fat0_start = self.offset + 512 + (self.pre.sector_size - 512) + \ (self.pre.reserved_sector_count - 1) * self.pre.sector_size fat1_start = fat0_start + self.pre.sectors_per_fat \ * self.pre.sector_size # update first fat on disk self.stream.seek(fat0_start + cluster_id * self._fat_entry.sizeof()) self.stream.write(self._fat_entry.build(value)) # update second fat on disk if it exists if self.pre.fat_count > 1: self.stream.seek(fat1_start + cluster_id * self._fat_entry.sizeof()) self.stream.write(self._fat_entry.build(value)) # flush changes to disk self.stream.flush() # re-read fats into memory fat_definition = Array( self.pre.fat_count, Bytes(self.pre.sectors_per_fat * self.pre.sector_size)) self.stream.seek(fat0_start) self.pre.fats = fat_definition.parse_stream(self.stream)
def write_fat_entry(self, cluster_id: int, value: typ.Union[int, str]) -> None: # make sure cluster_id is valid if cluster_id < 0 or cluster_id >= self.entries_per_fat: raise AttributeError("cluster_id out of bounds") # make sure user does not input invalid values as next cluster if isinstance(value, int): assert value <= 4086, "next_cluster value must be <= 4086. For " \ + "last cluster use 'last_cluster'. For " \ + "bad_cluster use 'bad_cluster'" assert value >= 2, "next_cluster value must be >= 2. For " \ + "free_cluster use 'free_cluster'" # get start position of FAT0 fat0_start = self.offset + 512 + (self.pre.sector_size - 512) + \ (self.pre.reserved_sector_count - 1) * self.pre.sector_size fat1_start = fat0_start + self.pre.sectors_per_fat \ * self.pre.sector_size # read current entry byte = cluster_id + int(cluster_id/2) self.stream.seek(fat0_start + byte) current_entry = self.stream.read(2).hex() new_entry_hex = self._fat_entry.build(value).hex() # calculate new entry as next entry overlaps with current bytes if cluster_id % 2 == 0: # if cluster_number is even, we need to keep the third nibble new_entry = new_entry_hex[0:2] + current_entry[2] \ + new_entry_hex[3] else: # if cluster_number is odd, we need to keep the second nibble new_entry = new_entry_hex[1] + current_entry[1] + \ new_entry_hex[3] + new_entry_hex[0] # convert hex to bytes new_entry = bytes.fromhex(new_entry) # write new value to first fat on disk self.stream.seek(fat0_start + byte) self.stream.write(new_entry) # write new value to second fat on disk if it exists if self.pre.fat_count > 1: self.stream.seek(fat1_start + byte) self.stream.write(new_entry) # flush changes to disk self.stream.flush() # re-read fats into memory fat_definition = Array(self.pre.fat_count, Bytes(self.pre.sectors_per_fat * self.pre.sector_size)) self.stream.seek(fat0_start) self.pre.fats = fat_definition.parse_stream(self.stream)
logging.basicConfig(level=logging.DEBUG) else: logging.basicConfig(level=logging.INFO) with args.input as fd: # ident logging.info(hgIdent.parse_stream(fd)) fd.seek(0x1400, os.SEEK_SET) logging.info(urladerlink.parse_stream(fd)) fd.seek(pagesize) a = anchor.parse_stream(fd) # task root (level 1) fd.seek(a.taskRoot.value * pagesize) taskRoot = blockTable.parse_stream(fd) # task dataspaces(?) (level 2) for taskid, taskref in enumerate(taskRoot): if taskref.value == 0xffffff: continue logging.info( f'task {taskid} is at {taskref.value} 0x{taskref.value*pagesize:x}' ) fd.seek(taskref.value * pagesize) dataspaces = blockTable.parse_stream(fd) for dsidhigh, dsref in enumerate(dataspaces): if dsref.value == 0xffffff: continue