def getitem(self, idx): if self.permutation is not None and idx != self.num_batches - 1: idx = self.permutation[idx] raw_label_data = os.pread(self.label_file, self.bytes_per_batch["label"], idx * self.bytes_per_batch["label"]) click = torch.from_numpy( np.frombuffer(raw_label_data, dtype=np.float32)) if self.numerical_features_file is not None: raw_numerical_data = os.pread( self.numerical_features_file, self.bytes_per_batch["numerical"], idx * self.bytes_per_batch["numerical"]) numerical_features = torch.from_numpy( np.frombuffer(raw_numerical_data, dtype=np.float32)).view(-1, 13) else: numerical_features = None if self.categorical_features_files is not None: categorical_features = [] for cat_file in self.categorical_features_files: raw_cat_data = os.pread( cat_file, self.bytes_per_batch["categorical"], idx * self.bytes_per_batch["categorical"]) categorical_features.append( torch.from_numpy( np.frombuffer(raw_cat_data, dtype=np.int32)).unsqueeze(1)) categorical_features = torch.cat(categorical_features, dim=1) else: categorical_features = None return click, numerical_features, categorical_features
def _get(self, idx): # calculate the offset & number of the samples to be read if not self._drop_last and idx == self._num_entries - 1: sample_offset = idx * (self._batch_size * self._global_size) + self._last_batch_offset[self._global_rank] batch = self._samples_in_last_batch else: sample_offset = idx * (self._batch_size * self._global_size) + (self._batch_size * self._global_rank) batch = self._batch_size # read the data from binary file label_raw_data = os.pread(self._label_file, 1 * batch, 1 * sample_offset) label = np.frombuffer(label_raw_data, dtype=np.bool_).reshape([batch, 1]) dense_raw_data = os.pread(self._dense_file, 26 * batch, 26 * sample_offset) dense = np.frombuffer(dense_raw_data, dtype=np.float16).reshape([batch, 13]) category = [] for i in range(26): category_raw_data = os.pread(self._category_file[i], self._category_bytes[i] * batch, self._category_bytes[i] * sample_offset) category.append(np.frombuffer(category_raw_data, dtype=self._category_type[i]).reshape([batch, 1]).astype(np.int32)) category = np.concatenate(category, axis=1) # convert numpy data to tensorflow data label = tf.convert_to_tensor(label, dtype=tf.float32) dense = tf.convert_to_tensor(dense, dtype=tf.float32) category = tf.convert_to_tensor(category, dtype=tf.int64) return (dense, category), label
def readLine(self): data = '' while os.pread(self.description, 1, self.offset) not in [self.NEW_LINE, self.END_OF_FILE]: data += str(os.pread(self.description, 1, self.offset), self.encoding) self.offset += 1 self.offset += 1 return data
def procmem_write(path, pid, gvar_name, symoff_table, base_addrs, write_data): fd = os.open("/proc/{}/mem".format(pid), os.O_RDWR) symoff = symoff_table[gvar_name] base_addr = base_addrs[path] print("access addr = 0x{:x}".format(base_addr + symoff)) print("before: 0x{:x}".format( struct.unpack("<I", os.pread(fd, 4, base_addr + symoff))[0])) os.pwrite(fd, struct.pack("<I", write_data), base_addr + symoff) print(" after: 0x{:x}".format( struct.unpack("<I", os.pread(fd, 4, base_addr + symoff))[0]))
def _get(self, i): assert i >= 0 and i < self.length offset = self.buflen * i with self.lock.rdlock(): buf = os.pread(self.indexfp.fileno(), self.buflen, offset) (o, l) = unpack('Qq', buf) if l < 0 or o < 0: return None data = os.pread(self.datafp.fileno(), l, o) assert len(data) == l return data
def _get(self, i): if i < 0 or self.length <= i: raise IndexError("index {} out of range ([0, {}])".format( i, self.length - 1)) offset = self.buflen * i with self.lock.rdlock(): buf = os.pread(self.cachefp.fileno(), self.buflen, offset) (o, l) = unpack('Qq', buf) if l < 0 or o < 0: return None data = os.pread(self.cachefp.fileno(), l, o) assert len(data) == l return data
def read(self, n=-1): data = '' offset = 0 if n <= -1: while os.pread(self.description, 1, offset): data += str(os.pread(self.description, 1, offset), self.encoding) offset += 1 else: while offset != n: data += str(os.pread(self.description, 1, offset), self.encoding) offset += 1 return data
def _read_chunks_from_disk(self, fds, offsets, sizes): sum_read_nbytes = 0 # for prometheus counter st = time.time() for i_smd in range(self.n_smd_files): if self.use_smds[i_smd]: continue # smd data were already copied offset = offsets[i_smd] size = sizes[i_smd] chunk = bytearray() for _ in range(self.max_retries + 1): chunk.extend(os.pread(fds[i_smd], size, offset)) got = memoryview(chunk).nbytes if got == sizes[i_smd]: break offset += got size -= got self.bigdata[i_smd].extend(chunk) sum_read_nbytes += sizes[i_smd] en = time.time() rate = 0 if sum_read_nbytes > 0: rate = (sum_read_nbytes / 1e6) / (en - st) logger.debug( f"event_manager: bd reads chunk {sum_read_nbytes/1e6:.5f} MB took {en-st:.2f} s (Rate: {rate:.2f} MB/s)" ) self._inc_prometheus_counter('MB', sum_read_nbytes / 1e6) self._inc_prometheus_counter('seconds', en - st) return
def hash_genesis_block(): blockheader = ( "02000000" + "a4051e368bfa0191e6c747507dd0fdb03da1a0a54ed14829810b97c6ac070000" + "e932b0f6b8da85ccc464d9d5066d01d904fb05ae8d1ddad7095b9148e3f08ba6" + "bcfb6459" + "f0ff0f1e" + "3682bb08") print("txdata:%s" % blockheader) blockheader_bin = binascii.unhexlify(swap_order(blockheader)) tx_data = blockheader_bin # Open files fd_h2c = os.open("/dev/xdma/card0/h2c0", os.O_WRONLY) fd_c2h = os.open("/dev/xdma/card0/c2h0", os.O_RDONLY) start_time = time.time() # Send to FPGA os.pwrite(fd_h2c, tx_data, 0) # Receive from FPGA rx_data = os.pread(fd_c2h, 32, 0) end_time = time.time() delay = end_time - start_time blockheder_rx = codecs.encode(rx_data, 'hex').decode('ascii') print("rxdata:%s" % swap_order(blockheder_rx)[0:64]) print("Time elapsed:%f microsec" % (delay * 1000000)) os.close(fd_h2c) os.close(fd_c2h)
def main(): fd = os.open("/dev/xdma0_user", os.O_RDWR) # sysmon is at 0x3000; first register of interest (temperature) is at 0x200 sysmon_base = 0x3200 vals = [] for reg in range(0x28): temp_string = os.pread(fd, 4, sysmon_base + (reg * 4)) temp_int = int.from_bytes(temp_string, byteorder='little') vals.append(temp_int) # At this point, vals contains the integer values read from ADC, with the same index as the ADC register offset printf(" | CUR | MIN | MAX |\n") printf("Temp C | %.1f | %.1f | %.1f |\n", ConvTemp(vals[0]), ConvTemp(vals[0x24]), ConvTemp(vals[0x20])) printf("VCCInt | %.2f | %.2f | %.2f |\n", ConvVolt(vals[1]), ConvVolt(vals[0x25]), ConvVolt(vals[0x21])) printf("VCCAux | %.2f | %.2f | %.2f |\n", ConvVolt(vals[2]), ConvVolt(vals[0x26]), ConvVolt(vals[0x22])) os.close(fd)
def _read_chunks_from_disk(self, fds, offsets, sizes): sum_read_nbytes = 0 # for prometheus counter st = time.time() for i in range(self.n_smd_files): offset = offsets[i] size = sizes[i] chunk = bytearray() for j in range(self.max_retries + 1): chunk.extend(os.pread(fds[i], size, offset)) got = memoryview(chunk).nbytes if got == sizes[i]: break offset += got size -= got self.bigdata[i].extend(chunk) sum_read_nbytes += sizes[i] en = time.time() rate = 0 if sum_read_nbytes > 0: rate = (sum_read_nbytes / 1e6) / (en - st) logging.info( f"event_manager: bd reads chunk {sum_read_nbytes/1e6:.5f} MB took {en-st:.2f} s (Rate: {rate:.2f} MB/s)" ) self._inc_prometheus_counter('MB', sum_read_nbytes / 1e6) return
def handle_v(subcmd): if subcmd == 'MustReplyEmpty': self.send("") elif subcmd.startswith('File:open'): (file_path, flags, mode) = subcmd.split(':')[-1].split(',') file_path = unhexlify(file_path).decode(encoding='UTF-8') flags = int(flags, base=16) mode = int(mode, base=16) if file_path.startswith(self.rootfs_abspath): file_abspath = file_path else: file_abspath = self.ql.os.transform_to_real_path(file_path) self.ql.log.debug("gdb> target file: %s" % (file_abspath)) if os.path.exists(file_abspath) and not (file_path).startswith("/proc"): fd = os.open(file_abspath, flags, mode) self.send("F%x" % fd) else: self.send("F-1") elif subcmd.startswith('File:pread:'): (fd, count, offset) = subcmd.split(':')[-1].split(',') fd = int(fd, base=16) offset = int(offset, base=16) count = int(count, base=16) data = os.pread(fd, count, offset) size = len(data) data = self.bin_to_escstr(data) if data: self.send(("F%x;" % size).encode() + (data)) else: self.send("F0;") elif subcmd.startswith('File:close'): fd = subcmd.split(':')[-1] fd = int(fd, base=16) os.close(fd) self.send("F0") elif subcmd.startswith('Kill'): self.send('OK') elif subcmd.startswith('Cont'): self.ql.log.debug("gdb> Cont command received: %s" % subcmd) if subcmd == 'Cont?': self.send('vCont;c;C;t;s;S;r') elif subcmd.startswith ("Cont;"): subcmd = subcmd.split(';') subcmd = subcmd[1].split(':') if subcmd[0] in ('c', 'C05'): handle_c(subcmd) elif subcmd[0] in ('S', 's', 'S05'): handle_s(subcmd) else: self.send("")
def _read(self, fd, size, offset): st = time.monotonic() chunk = bytearray() for i_retry in range(self.max_retries+1): chunk.extend(os.pread(fd, size, offset)) got = memoryview(chunk).nbytes if got == size: break offset += got size -= got found_xtc2_flags = self.dm.found_xtc2('bd') if got == 0 and all(found_xtc2_flags): print(f'bigddata got 0 byte and .xtc2 files found on disk. stop reading this .inprogress file') break print(f'bigdata read retry#{i_retry} - waiting for {size/1e6} MB, max_retries: {self.max_retries} (PS_R_MAX_RETRIES), sleeping 1 second...') time.sleep(1) en = time.monotonic() sum_read_nbytes = memoryview(chunk).nbytes # for prometheus counter rate = 0 if sum_read_nbytes > 0: rate = (sum_read_nbytes/1e6)/(en-st) logger.debug(f"bd reads chunk {sum_read_nbytes/1e6:.5f} MB took {en-st:.2f} s (Rate: {rate:.2f} MB/s)") self._inc_prometheus_counter('MB', sum_read_nbytes/1e6) self._inc_prometheus_counter('seconds', en-st) return chunk
def update(self): try: self.sync_led_state ^= True keypress = os.pread(self.fd, 1, 0xDB)[0] if (keypress & 0b01000000): log.info('Fn+F11 keypress') new = dict(iter_state()) airplane_mode = any(new.values()) sync_led(self.fd, airplane_mode) if airplane_mode: self.restore = new self.old = dict(iter_write_airplane_on()) else: self.old = dict(iter_write_airplane_off(self.restore)) write_int(self.fd, 0xDB, clear_bit6(keypress)) log.info('airplane_mode: %r', airplane_mode) elif self.sync_led_state: new = dict(iter_state()) if new != self.old: log.info('%r != %r', new, self.old) self.old = new airplane_mode = not any(new.values()) sync_led(self.fd, airplane_mode) log.info('airplane_mode: %r', airplane_mode) return True except Exception: log.exception('Error in AirplaneMode.update():') return False
def _get(self, i): assert 0 <= i < self.length self._open_fds() offset = self.buflen * i fcntl.flock(self.index_fd, fcntl.LOCK_SH) index_entry = os.pread(self.index_fd, self.buflen, offset) (o, l) = unpack('Qq', index_entry) if l < 0 or o < 0: fcntl.flock(self.index_fd, fcntl.LOCK_UN) return None data = os.pread(self.data_fd, l, o) assert len(data) == l fcntl.flock(self.index_fd, fcntl.LOCK_UN) return data
def empty_bin(self, worker_no): data_bin = self.workers_bins[worker_no] offset = 0 os.lseek(data_bin["file"], offset, 0) all_observations = np.zeros( shape=[data_bin["size"], *self.input_metadata[0][1:]], dtype=self.input_metadata[1]) all_actions = np.zeros( shape=[data_bin["size"], *self.action_metadata[0][1:]], dtype=self.action_metadata[1]) all_rewards = np.zeros(shape=[data_bin["size"]], dtype=self.reward_metadata[1]) for i in range(data_bin["size"]): data_bytes = os.pread(data_bin["file"], self.line_bytes, offset) observation, action, reward = self.parse_data(data_bytes) all_observations[i] = observation all_actions[i] = action all_rewards[i] = reward offset += self.line_bytes data_bin["size"] = 0 os.ftruncate(data_bin["file"], 0) return all_observations, all_actions, all_rewards
def _put(self, i, data): if self.closed: return False if i < 0 or self.length <= i: raise IndexError("index {} out of range ([0, {}])".format( i, self.length - 1)) self._open_fds() index_ofst = self.buflen * i fcntl.flock(self.cache_fd, fcntl.LOCK_EX) buf = os.pread(self.cache_fd, self.buflen, index_ofst) (o, l) = unpack('Qq', buf) if l >= 0 and o >= 0: # Already data exists fcntl.flock(self.cache_fd, fcntl.LOCK_UN) return False data_pos = os.lseek(self.cache_fd, 0, os.SEEK_END) if self.cache_size_limit: if self.cache_size_limit < (data_pos + len(data)): self._frozen = True fcntl.flock(self.cache_fd, fcntl.LOCK_UN) return False index_entry = pack('Qq', data_pos, len(data)) assert os.pwrite(self.cache_fd, index_entry, index_ofst) == self.buflen assert os.pwrite(self.cache_fd, data, data_pos) == len(data) os.fsync(self.cache_fd) fcntl.flock(self.cache_fd, fcntl.LOCK_UN) return True
def read(self, phys_addr, len): res = gdb.execute(f"monitor gpa2hva {hex(phys_addr)}", to_string=True) try: hva = int(res.split(" ")[-1], 16) except: raise OSError("Physical address is not accessible") return os.pread(self.file, len, hva)
def procmem_read(path, pid, gvar_name, symoff_table, base_addrs): fd = os.open("/proc/{}/mem".format(pid), os.O_RDWR) symoff = symoff_table[gvar_name] base_addr = base_addrs[path] print("access addr = 0x{:x}".format(base_addr + symoff)) print("read result: 0x{:x}".format( struct.unpack("<I", os.pread(fd, 4, base_addr + symoff))[0]))
def empty_attack_bin(self, worker_no): data_bin = self.workers_bins[worker_no]["attack"] offset = 0 os.lseek(data_bin["file"], offset, 0) all_observations = np.zeros( shape=[data_bin["size"], *self.input_metadata[0][1:]], dtype=self.input_metadata[1]) all_attack_actions = np.zeros( shape=[data_bin["size"], *self.attack_metadata[0][1:]], dtype=self.attack_metadata[1]) #all_rewards = np.zeros(shape=[data_bin["size"]], dtype=self.reward_metadata[1]) all_modes = np.zeros( shape=[data_bin["size"], *self.mode_metadata[0][1:]], dtype=self.mode_metadata[1]) line_bytes = self.input_bytes + self.attack_bytes for i in range(data_bin["size"]): data_bytes = os.pread(data_bin["file"], line_bytes, offset) observation, attack_action = self.parse_attack_data(data_bytes) all_observations[i] = observation all_attack_actions[i] = attack_action #all_rewards[i] = reward all_modes[i][1] = 1 offset += line_bytes data_bin["size"] = 0 os.ftruncate(data_bin["file"], 0) return all_observations, all_attack_actions, all_modes
def _get_numerical_features(self, idx: int) -> Optional[torch.Tensor]: if self._numerical_features_file is None: return None raw_numerical_data = os.pread(self._numerical_features_file, self._numerical_bytes_per_batch, idx * self._numerical_bytes_per_batch) array = np.frombuffer(raw_numerical_data, dtype=np.float16) return torch.from_numpy(array).view(-1, self._number_of_numerical_features)
def _get_label(self, idx: int) -> tf.Tensor: raw_label_data = os.pread(self._label_file, self._label_bytes_per_batch, idx * self._label_bytes_per_batch) array = np.frombuffer(raw_label_data, dtype=np.bool) array = tf.convert_to_tensor(array, dtype=tf.float32) array = tf.expand_dims(array, 1) return array
def read(self, address, length, chunk_size, overlap=0): while length: readback = min(chunk_size, length) yield address, os.pread(self._procmem, readback, address) if readback > overlap: readback -= overlap address += readback length -= readback
def read64(address): if address % 16 == 0: read64.rdata = int.from_bytes( os.pread(fd_c2h, 16, address + 0x20000000), 'little') data = read64.rdata % (2**64) read64.rdata = read64.rdata // (2**64) return data
def mem_test_random(): # Status test_ok = True test_msg = "OK\n" # This is the only number that should need to change- how many MB to generate # 256M16 part is 512MB; 512M16 part is 1024GB NUM_MB = 1024 # Generate some data TRANSFER_SIZE = 1024 * 1024 * 4 NUM_TRANSFERS = int(NUM_MB / 4) tx_data = [] rx_data = [] for page in range(NUM_TRANSFERS): tx_data.append(bytearray(numpy.random.bytes(TRANSFER_SIZE))) # Open files fd_h2c = os.open("/dev/xdma0_h2c_0", os.O_WRONLY) fd_c2h = os.open("/dev/xdma0_c2h_0", os.O_RDONLY) # Send to FPGA block RAM start = time.time() for page in range(NUM_TRANSFERS): os.pwrite(fd_h2c, tx_data[page], page * TRANSFER_SIZE) end = time.time() duration = end - start # Print time BPS = TRANSFER_SIZE * NUM_TRANSFERS / (duration) print("Sent in " + str((duration) * 1000.0) + " milliseconds (" + str(BPS / 1000000) + " MBPS)") # Receive from FPGA block RAM start = time.time() for page in range(NUM_TRANSFERS): rx_data.append(os.pread(fd_c2h, TRANSFER_SIZE, page * TRANSFER_SIZE)) end = time.time() duration = end - start # Print time BPS = TRANSFER_SIZE * NUM_TRANSFERS / (duration) print("Received in " + str((duration) * 1000.0) + " milliseconds (" + str(BPS / 1000000) + " MBPS)") # Make sure data matches for page in range(NUM_TRANSFERS): if tx_data[page] != rx_data[page]: test_ok = False test_msg = "Whoops on page " + str(page) + "\n" print(test_msg) with open('err.log', 'a') as errlog: errlog.write(test_msg) os.close(fd_h2c) os.close(fd_c2h)
def read_proc_output(logfd, offset): size = os.fstat(logfd).st_size if size > offset: data = os.pread(logfd, size - offset, offset) sys.stdout.write(data.decode('utf-8')) offset = size else: data = None return offset, data
def main(): fd = os.open("/dev/xdma0_user", os.O_RDONLY) temp_string = os.pread(fd, 4, 0x1000) print("Version " + str(temp_string)) os.close(fd)
def _get(self, i): if i < 0 or self.length <= i: raise IndexError("index {} out of range ([0, {}])".format( i, self.length - 1)) self._open_fds() offset = self.buflen * i fcntl.flock(self.cache_fd, fcntl.LOCK_SH) index_entry = os.pread(self.cache_fd, self.buflen, offset) (o, l) = unpack('Qq', index_entry) if l < 0 or o < 0: fcntl.flock(self.cache_fd, fcntl.LOCK_UN) return None data = os.pread(self.cache_fd, l, o) assert len(data) == l fcntl.flock(self.cache_fd, fcntl.LOCK_UN) return data
def validate_erased(target, signature_map): target_fd = os.open(target, os.O_RDONLY) try: for sig, offset in signature_map: if os.pread(target_fd, 16, offset) == sig: return False finally: os.close(target_fd) return True
def read(self, length, offset): if self.iolock: self.iolock.acquire() try: self.file.seek(offset) return self.file.read(length) finally: self.iolock.release() else: return os.pread(self.fd, length, offset)
def __read(self, length, offset): ''' Wrapper around `os.pread` to reads as much as requested, from the opened file. @param length:int The number of bytes to read. @param offset:int Whence shall we read? @return :bytes The requested area of the file. ''' rc = [] while True: got = os.pread(self.fd, length, offset) if len(got) == 0: break rc += got length -= len(got) offset += len(got) return rc
def main(): # Generate some data TRANSFER_SIZE = 4096 tx_data = bytearray(os.urandom(TRANSFER_SIZE)) # Open files fd_h2c = os.open("/dev/xdma/card0/h2c0", os.O_WRONLY) fd_c2h = os.open("/dev/xdma/card0/c2h0", os.O_RDONLY) # Send to FPGA block RAM start = time.time() os.pwrite(fd_h2c, tx_data, 0); end = time.time() duration = end-start; # Print time BPS = TRANSFER_SIZE / (duration); print("Sent in " + str((duration)*1000.0) + " milliseconds (" + str(BPS/1000000) + " MBPS)") # Receive from FPGA block RAM start = time.time() rx_data = os.pread(fd_c2h, TRANSFER_SIZE, 0); end = time.time() duration = end-start; # Print time BPS = TRANSFER_SIZE / (duration); print("Received in " + str((duration)*1000.0) + " milliseconds (" + str(BPS/1000000) + " MBPS)") # Make sure data matches if tx_data != rx_data: print ("Whoops") else: print ("OK") # done os.close(fd_h2c) os.close(fd_c2h)
def task(i): start = time.perf_counter() os.pread(disk.fileno(), bufsize, offsets[i]) finish = time.perf_counter() times[i] = (finish-start)
def _read(self, buffersize, offset): """read that uses pread""" # pylint: disable=no-member return os.pread(self._handle.fileno(), buffersize, offset)
print('Measuring: Concurrent random seek time using readahead.') print('Samples: {0} Sample size: {1}'.format( bufcount, bufsize)) for area in [BytesInt('1MB')*2**i for i in range(0,64)]+[disksize]: if area > disksize: continue os.system('echo 3 | sudo tee /proc/sys/vm/drop_caches > /dev/null') offsets = [random.randint(0, area-bufsize) for i in range(bufcount)] for i in offsets: readahead(disk.fileno(), i, bufsize) times = [timeit.timeit(lambda: os.pread(disk.fileno(), bufsize, i), number=1) for i in offsets] print('Area tested: {0:6} Average: {1:5.2f} ms Max: {2:5.2f} ms Total: {3:0.2f} sec'.format( BytesString(area) if area < disksize else BytesStringFloat(area), sum(times)/len(times)*1000, max(times)*1000, sum(times))) #-------------------------------------------------------------------------------------------------- bufsize = 512 bufcount = 100 print() print('Measuring: Random seek time using beginning of disk.') print('Samples: {0} Sample size: {1}'.format( bufcount, bufsize))
os.system('echo 3 | sudo tee /proc/sys/vm/drop_caches > /dev/null') offsets = [] for _ in range(bufcount): right = random.randint(0, area) offsets.append(right) for i in offsets: readahead(disk.fileno(), i, bufsize) times = [] disk.seek(0) disk.read(bufsize) for i in offsets: start = time.perf_counter() os.pread(disk.fileno(), bufsize, i) finish = time.perf_counter() times.append(finish-start) print('Area tested: {0:6} Average: {1:5.2f} ms Max: {2:5.2f} ms Total: {3:0.2f} sec'.format( BytesString(area) if area < disksize else BytesStringFloat(area), sum(times)/len(times)*1000, max(times)*1000, sum(times))) #-------------------------------------------------------------------------------------------------- bufsize = 512 bufcount = 100 print() print('Measuring: Concurrent random seek time using thread pool.')
def _readoff(self, off, size): return os.pread(self.fileno, size, off)
import sys import time import os from collections import namedtuple import ctypes class HFSplus: def __init__(self): self = self HFSPlusVolumeHeader = namedtuple( "HFSPlusVolumeHeader", "field1 field2 field3") def volume_reader(self) # os.pread(fd, buffersize, offset) os.pread(fd, vheader, 1024) #/* HFS Plus Volume Header - 512 bytes */ #/* Stored at sector #2 (3rd sector) and second-to-last sector. */ #struct HFSPlusVolumeHeader { # ctypes.u_int16_t signature; /* == kHFSPlusSigWord */ # ctypes.u_int16_t version; /* == kHFSPlusVersion */ # ctypes.u_int32_t attributes; /* volume attributes */ # ctypes.u_int32_t lastMountedVersion; /* implementation version which last mounted volume */ # ctypes.u_int32_t journalInfoBlock; /* block addr of journal info (if volume is journaled, zero otherwise) */ # # ctypes.u_int32_t createDate; /* date and time of volume creation */ # ctypes.u_int32_t modifyDate; /* date and time of last modification */ # ctypes.u_int32_t backupDate; /* date and time of last backup */