def __init__(self, toc, is_periodical): strings = [] for item in toc.iterdescendants(breadth_first=True): strings.append(item.title) if is_periodical: strings.append(item.klass) if item.author: strings.append(item.author) if item.description: strings.append(item.description) CNCX_.__init__(self, strings)
def __init__(self, toc_table): strings = [] for entry in toc_table: strings.append(entry['label']) aut = entry.get('author', None) if aut: strings.append(aut) desc = entry.get('description', None) if desc: strings.append(desc) kind = entry.get('kind', None) if kind: strings.append(kind) self.cncx = CNCX(strings) try: largest = max(x['index'] for x in toc_table) except ValueError: largest = 0 fmt = '%0{0}X'.format(max(2, len('%X' % largest))) def to_entry(x): ans = {} for f in ('offset', 'length', 'depth', 'pos_fid', 'parent', 'first_child', 'last_child'): if f in x: ans[f] = x[f] for f in ('label', 'description', 'author', 'kind'): if f in x: ans[f] = self.cncx[x[f]] return (fmt % x['index'], ans) self.entries = list(map(to_entry, toc_table))
def __init__(self, guide_table): self.cncx = CNCX(c.title for c in guide_table) self.entries = [(r.type, { 'title': self.cncx[r.title], 'pos_fid': r.pos_fid, }) for r in guide_table]
def __init__(self, toc_table): strings = [] for entry in toc_table: strings.append(entry['label']) aut = entry.get('author', None) if aut: strings.append(aut) desc = entry.get('description', None) if desc: strings.append(desc) kind = entry.get('kind', None) if kind: strings.append(kind) self.cncx = CNCX(strings) def to_entry(x): ans = {} for f in ('offset', 'length', 'depth', 'pos_fid', 'parent', 'first_child', 'last_child'): if f in x: ans[f] = x[f] for f in ('label', 'description', 'author', 'kind'): if f in x: ans[f] = self.cncx[x[f]] return ('%02x' % x['index'], ans) self.entries = list(map(to_entry, toc_table))
def __init__(self, chunk_table): self.cncx = CNCX(c.selector for c in chunk_table) self.entries = [('%010d' % c.insert_pos, { 'cncx_offset': self.cncx[c.selector], 'file_number': c.file_number, 'sequence_number': c.sequence_number, 'geometry': (c.start_pos, c.length), }) for c in chunk_table]
class Index(object): # {{{ control_byte_count = 1 cncx = CNCX() tag_types = (EndTagTable, ) HEADER_LENGTH = IndexHeader.HEADER_LENGTH @classmethod def generate_tagx(cls): header = b'TAGX' byts = bytearray() for tag_meta in cls.tag_types: byts.extend(tag_meta[1:]) # table length, control byte count header += pack(b'>II', 12 + len(byts), cls.control_byte_count) return header + bytes(byts) @classmethod def calculate_control_bytes_for_each_entry(cls, entries): control_bytes = [] for lead_text, tags in entries: cbs = [] ans = 0 for (name, number, vpe, mask, endi) in cls.tag_types: if endi == 1: cbs.append(ans) ans = 0 continue try: nvals = len(tags.get(name, ())) except TypeError: nvals = 1 nentries = nvals // vpe shifts = mask_to_bit_shifts[mask] ans |= mask & (nentries << shifts) if len(cbs) != cls.control_byte_count: raise ValueError('The entry %r is invalid' % [lead_text, tags]) control_bytes.append(cbs) return control_bytes def __call__(self): self.control_bytes = self.calculate_control_bytes_for_each_entry( self.entries) index_blocks, idxt_blocks, record_counts, last_indices = [BytesIO()], [ BytesIO() ], [0], [b''] buf = BytesIO() RECORD_LIMIT = 0x10000 - self.HEADER_LENGTH - 1048 # kindlegen uses 1048 (there has to be some margin because of block alignment) for i, (index_num, tags) in enumerate(self.entries): control_bytes = self.control_bytes[i] buf.seek(0), buf.truncate(0) index_num = (index_num.encode('utf-8') if isinstance( index_num, unicode) else index_num) raw = bytearray(index_num) raw.insert(0, len(index_num)) buf.write(bytes(raw)) buf.write(bytes(bytearray(control_bytes))) for tag in self.tag_types: values = tags.get(tag.name, None) if values is None: continue try: len(values) except TypeError: values = [values] if values: for val in values: try: buf.write(encint(val)) except ValueError: raise ValueError('Invalid values for %r: %r' % (tag, values)) raw = buf.getvalue() offset = index_blocks[-1].tell() idxt_pos = idxt_blocks[-1].tell() if offset + idxt_pos + len(raw) + 2 > RECORD_LIMIT: index_blocks.append(BytesIO()) idxt_blocks.append(BytesIO()) record_counts.append(0) offset = idxt_pos = 0 last_indices.append(b'') record_counts[-1] += 1 idxt_blocks[-1].write(pack(b'>H', self.HEADER_LENGTH + offset)) index_blocks[-1].write(raw) last_indices[-1] = index_num index_records = [] for index_block, idxt_block, record_count in zip( index_blocks, idxt_blocks, record_counts): index_block = align_block(index_block.getvalue()) idxt_block = align_block(b'IDXT' + idxt_block.getvalue()) # Create header for this index record header = b'INDX' buf.seek(0), buf.truncate(0) buf.write(pack(b'>I', self.HEADER_LENGTH)) buf.write(b'\0' * 4) # Unknown buf.write( pack(b'>I', 1) ) # Header type (0 for Index header record and 1 for Index records) buf.write(b'\0' * 4) # Unknown # IDXT block offset buf.write(pack(b'>I', self.HEADER_LENGTH + len(index_block))) # Number of index entries in this record buf.write(pack(b'>I', record_count)) buf.write(b'\xff' * 8) # Unknown buf.write(b'\0' * 156) # Unknown header += buf.getvalue() index_records.append(header + index_block + idxt_block) if len(index_records[-1]) > 0x10000: raise ValueError( 'Failed to rollover index blocks for very large index.') # Create the Index Header record tagx = self.generate_tagx() # Geometry of the index records is written as index entries pointed to # by the IDXT records buf.seek(0), buf.truncate() idxt = [b'IDXT'] pos = IndexHeader.HEADER_LENGTH + len(tagx) for last_idx, num in zip(last_indices, record_counts): start = buf.tell() idxt.append(pack(b'>H', pos)) buf.write(bytes(bytearray([len(last_idx)])) + last_idx) buf.write(pack(b'>H', num)) pos += buf.tell() - start header = { 'num_of_entries': sum(r for r in record_counts), 'num_of_records': len(index_records), 'num_of_cncx': len(self.cncx), 'tagx': align_block(tagx), 'geometry': align_block(buf.getvalue()), 'idxt': align_block(b''.join(idxt)), } header = IndexHeader()(**header) self.records = [header] + index_records self.records.extend(self.cncx.records) return self.records
class Index(object): # {{{ control_byte_count = 1 cncx = CNCX() tag_types = (EndTagTable, ) HEADER_LENGTH = IndexHeader.HEADER_LENGTH @classmethod def generate_tagx(cls): header = b'TAGX' byts = bytearray() for tag_meta in cls.tag_types: byts.extend(tag_meta[1:]) # table length, control byte count header += pack(b'>II', 12 + len(byts), cls.control_byte_count) return header + bytes(byts) @classmethod def calculate_control_bytes_for_each_entry(cls, entries): control_bytes = [] for lead_text, tags in entries: cbs = [] ans = 0 for (name, number, vpe, mask, endi) in cls.tag_types: if endi == 1: cbs.append(ans) ans = 0 continue try: nvals = len(tags.get(name, ())) except TypeError: nvals = 1 nentries = nvals // vpe shifts = mask_to_bit_shifts[mask] ans |= mask & (nentries << shifts) if len(cbs) != cls.control_byte_count: raise ValueError('The entry %r is invalid' % [lead_text, tags]) control_bytes.append(cbs) return control_bytes def __call__(self): self.control_bytes = self.calculate_control_bytes_for_each_entry( self.entries) rendered_entries = [] index, idxt, buf = BytesIO(), BytesIO(), BytesIO() IndexEntry = namedtuple('IndexEntry', 'offset length raw') last_lead_text = b'' too_large = ValueError( 'Index has too many entries, calibre does not' ' support generating multiple index records at this' ' time.') for i, x in enumerate(self.entries): control_bytes = self.control_bytes[i] leading_text, tags = x buf.seek(0), buf.truncate(0) leading_text = (leading_text.encode('utf-8') if isinstance( leading_text, unicode) else leading_text) raw = bytearray(leading_text) raw.insert(0, len(leading_text)) buf.write(bytes(raw)) buf.write(bytes(bytearray(control_bytes))) for tag in self.tag_types: values = tags.get(tag.name, None) if values is None: continue try: len(values) except TypeError: values = [values] if values: for val in values: try: buf.write(encint(val)) except ValueError: raise ValueError('Invalid values for %r: %r' % (tag, values)) raw = buf.getvalue() offset = index.tell() if offset + self.HEADER_LENGTH >= 0x10000: raise too_large rendered_entries.append(IndexEntry(offset, len(raw), raw)) idxt.write(pack(b'>H', self.HEADER_LENGTH + offset)) index.write(raw) last_lead_text = leading_text index_block = align_block(index.getvalue()) idxt_block = align_block(b'IDXT' + idxt.getvalue()) body = index_block + idxt_block if len(body) + self.HEADER_LENGTH >= 0x10000: raise too_large header = b'INDX' buf.seek(0), buf.truncate(0) buf.write(pack(b'>I', self.HEADER_LENGTH)) buf.write(b'\0' * 4) # Unknown buf.write(pack(b'>I', 1)) # Header type? Or index record number? buf.write(b'\0' * 4) # Unknown # IDXT block offset buf.write(pack(b'>I', self.HEADER_LENGTH + len(index_block))) # Number of index entries buf.write(pack(b'>I', len(rendered_entries))) buf.write(b'\xff' * 8) # Unknown buf.write(b'\0' * 156) # Unknown header += buf.getvalue() index_record = header + body tagx = self.generate_tagx() idxt = (b'IDXT' + pack(b'>H', IndexHeader.HEADER_LENGTH + len(tagx)) + b'\0') # Last index idx = bytes(bytearray([len(last_lead_text)])) + last_lead_text idx += pack(b'>H', len(rendered_entries)) header = { 'num_of_entries': len(rendered_entries), 'num_of_cncx': len(self.cncx), 'tagx': tagx, 'last_index': align_block(idx), 'idxt': idxt } header = IndexHeader()(**header) self.records = [header, index_record] self.records.extend(self.cncx.records) return self.records