def set_metadata(self, metadata): if (metadata is None): return import tempfile id3_chunk = ID3v22Comment.converted(metadata).build() new_aiff = tempfile.TemporaryFile() new_aiff.seek(12, 0) id3_found = False for (chunk_id, chunk_length, chunk_file) in self.chunk_files(): if (chunk_id != 'ID3 '): new_aiff.write( self.CHUNK_HEADER.build( construct.Container(chunk_id=chunk_id, chunk_length=chunk_length))) transfer_data(chunk_file.read, new_aiff.write) else: new_aiff.write( self.CHUNK_HEADER.build( construct.Container(chunk_id='ID3 ', chunk_length=len(id3_chunk)))) new_aiff.write(id3_chunk) id3_found = True if (not id3_found): new_aiff.write( self.CHUNK_HEADER.build( construct.Container(chunk_id='ID3 ', chunk_length=len(id3_chunk)))) new_aiff.write(id3_chunk) header = construct.Container(aiff_id='FORM', aiff_size=new_aiff.tell() - 8, aiff_type='AIFF') new_aiff.seek(0, 0) new_aiff.write(self.AIFF_HEADER.build(header)) new_aiff.seek(0, 0) f = open(self.filename, 'wb') transfer_data(new_aiff.read, f.write) new_aiff.close() f.close()
def build(self): return self.APEv2_TAG.build( construct.Container(key=self.key, value=self.data, length=len(self.data), encoding=self.type, undefined1=0, undefined2=0, undefined3=0, read_only=self.read_only, contains_header=False, contains_no_footer=False, is_header=False))
def build(self): comment = construct.Container(vendor_string=self.vendor_string, framing=1, value=[]) for (key, values) in self.items(): for value in values: if ((value != u"") and not ((key in ("TRACKNUMBER", "TRACKTOTAL", "DISCNUMBER", "DISCTOTAL")) and (value == u"0"))): comment.value.append("%s=%s" % (key, value.encode('utf-8'))) return self.VORBIS_COMMENT.build(comment)
def build(self): header = construct.Container(preamble='APETAGEX', version_number=2000, tag_size=0, item_count=len(self.tags), undefined1=0, undefined2=0, undefined3=0, read_only=False, encoding=0, contains_header=True, contains_no_footer=False, is_header=True, reserved=0l) footer = construct.Container(preamble=header.preamble, version_number=header.version_number, tag_size=0, item_count=len(self.tags), undefined1=0, undefined2=0, undefined3=0, read_only=False, encoding=0, contains_header=True, contains_no_footer=False, is_header=False, reserved=0l) tags = "".join([tag.build() for tag in self.tags]) footer.tag_size = header.tag_size = \ len(tags) + len(ApeTag.APEv2_FOOTER.build(footer)) return ApeTag.APEv2_FOOTER.build(header) + \ tags + \ ApeTag.APEv2_FOOTER.build(footer)
def from_pcm(cls, filename, pcmreader, compression=None): if (pcmreader.bits_per_sample not in (8, 16, 24)): raise InvalidFormat( _(u"Unsupported bits per sample %s") % (pcmreader.bits_per_sample)) bytes_per_sample = pcmreader.bits_per_sample / 8 header = construct.Container(magic_number='.snd', data_offset=0, data_size=0, encoding_format={ 8: 2, 16: 3, 24: 4 }[pcmreader.bits_per_sample], sample_rate=pcmreader.sample_rate, channels=pcmreader.channels) try: f = file(filename, 'wb') except IOError: raise EncodingError(None) try: #send out a dummy header f.write(AuAudio.AU_HEADER.build(header)) header.data_offset = f.tell() #send our big-endian PCM data #d will be a list of ints, so we can't use transfer_data framelist = pcmreader.read(BUFFER_SIZE) while (len(framelist) > 0): bytes = framelist.to_bytes(True, True) f.write(bytes) header.data_size += len(bytes) framelist = pcmreader.read(BUFFER_SIZE) #send out a complete header f.seek(0, 0) f.write(AuAudio.AU_HEADER.build(header)) finally: f.close() try: pcmreader.close() except DecodingError: raise EncodingError() return AuAudio(filename)
def delete_metadata(self): import tempfile new_aiff = tempfile.TemporaryFile() new_aiff.seek(12, 0) for (chunk_id, chunk_length, chunk_file) in self.chunk_files(): if (chunk_id != 'ID3 '): new_aiff.write( self.CHUNK_HEADER.build( construct.Container(chunk_id=chunk_id, chunk_length=chunk_length))) transfer_data(chunk_file.read, new_aiff.write) header = construct.Container(aiff_id='FORM', aiff_size=new_aiff.tell() - 8, aiff_type='AIFF') new_aiff.seek(0, 0) new_aiff.write(self.AIFF_HEADER.build(header)) new_aiff.seek(0, 0) f = open(self.filename, 'wb') transfer_data(new_aiff.read, f.write) new_aiff.close() f.close()
def __str__(self): def __count_digits__(i): if (i == 0): return 0 else: return (i % 10) + __count_digits__(i / 10) disc_id = construct.Container() disc_id.track_count = len(self.tracks) disc_id.length = self.length() / 75 disc_id.digit_sum = sum( [__count_digits__(o / 75) for o in self.offsets()]) % 0xFF return DiscID.DISCID.build(disc_id).encode('hex')
def _encode(self, value, context): import math if (value < 0): signed = True value *= -1 else: signed = False (fmant, exponent) = math.frexp(value) if ((exponent > 16384) or (fmant >= 1)): exponent = 0x7FFF mantissa = 0 else: exponent += 16382 mantissa = fmant * (2**64) return construct.Container(signed=signed, exponent=exponent, mantissa=mantissa)
def build_id3v1(cls, song_title, artist, album, year, comment, track_number): def __s_pad__(s, length): if (len(s) < length): return s + chr(0) * (length - len(s)) else: s = s[0:length].rstrip() return s + chr(0) * (length - len(s)) c = construct.Container() c.identifier = 'TAG' c.song_title = __s_pad__(song_title.encode('ascii', 'replace'), 30) c.artist = __s_pad__(artist.encode('ascii', 'replace'), 30) c.album = __s_pad__(album.encode('ascii', 'replace'), 30) c.year = __s_pad__(year.encode('ascii', 'replace'), 4) c.comment = __s_pad__(comment.encode('ascii', 'replace'), 28) c.track_number = int(track_number) c.genre = 0 return ID3v1Comment.ID3v1.build(c)
def _build(self, obj, stream, context): data = self.sub_atom.build(obj) stream.write( self.header.build( construct.Container(type=self.atom_name, size=len(data) + 8))) stream.write(data)
def from_pcm(cls, filename, pcmreader, compression=None): try: f = open(filename, 'wb') except IOError: raise EncodingError(None) if (int(pcmreader.channel_mask) in ( 0x4, #FC 0x3, #FL, FR 0x7, #FL, FR, FC 0x33, #FL, FR, BL, BR 0x707) #FL, SL, FC, FR, SR, BC ): standard_channel_mask = ChannelMask(pcmreader.channel_mask) aiff_channel_mask = AIFFChannelMask(standard_channel_mask) pcmreader = ReorderedPCMReader(pcmreader, [ standard_channel_mask.channels().index(channel) for channel in aiff_channel_mask.channels() ]) try: aiff_header = construct.Container(aiff_id='FORM', aiff_size=4, aiff_type='AIFF') comm_chunk = construct.Container( channels=pcmreader.channels, total_sample_frames=0, sample_size=pcmreader.bits_per_sample, sample_rate=float(pcmreader.sample_rate)) ssnd_header = construct.Container(chunk_id='SSND', chunk_length=0) ssnd_alignment = construct.Container(offset=0, blocksize=0) #skip ahead to the start of the SSND chunk f.seek( cls.AIFF_HEADER.sizeof() + cls.CHUNK_HEADER.sizeof() + cls.COMM_CHUNK.sizeof() + cls.CHUNK_HEADER.sizeof(), 0) #write the SSND alignment info f.write(cls.SSND_ALIGN.build(ssnd_alignment)) #write big-endian samples to SSND chunk from pcmreader framelist = pcmreader.read(BUFFER_SIZE) total_pcm_frames = 0 while (len(framelist) > 0): f.write(framelist.to_bytes(True, True)) total_pcm_frames += framelist.frames framelist = pcmreader.read(BUFFER_SIZE) total_size = f.tell() #return to the start of the file f.seek(0, 0) #write AIFF header aiff_header.aiff_size = total_size - 8 f.write(cls.AIFF_HEADER.build(aiff_header)) #write COMM chunk comm_chunk.total_sample_frames = total_pcm_frames comm_chunk = cls.COMM_CHUNK.build(comm_chunk) f.write( cls.CHUNK_HEADER.build( construct.Container(chunk_id='COMM', chunk_length=len(comm_chunk)))) f.write(comm_chunk) #write SSND chunk header f.write( cls.CHUNK_HEADER.build( construct.Container( chunk_id='SSND', chunk_length=(total_pcm_frames * (pcmreader.bits_per_sample / 8) * pcmreader.channels) + cls.SSND_ALIGN.sizeof()))) try: pcmreader.close() except DecodingError: raise EncodingError() finally: f.close() return cls(filename)
def set_metadata(self, metadata): metadata = VorbisComment.converted(metadata) if (metadata is None): return reader = OggStreamReader(file(self.filename, 'rb')) new_file = cStringIO.StringIO() writer = OggStreamWriter(new_file) current_sequence_number = 0 pages = reader.pages() #transfer our old header #this must always be the first packet and the first page (header_page, header_data) = pages.next() writer.write_page(header_page, header_data) current_sequence_number += 1 #grab the current "comment" and "setup headers" packets #these may take one or more pages, #but will always end on a page boundary del (pages) packets = reader.packets(from_beginning=False) comment_packet = packets.next() headers_packet = packets.next() #write the pages for our new "comment" packet for (page, data) in OggStreamWriter.build_pages( 0, header_page.bitstream_serial_number, current_sequence_number, VorbisAudio.COMMENT_HEADER.build( construct.Container(packet_type=3, vorbis='vorbis')) + metadata.build()): writer.write_page(page, data) current_sequence_number += 1 #write the pages for the old "setup headers" packet for (page, data) in OggStreamWriter.build_pages( 0, header_page.bitstream_serial_number, current_sequence_number, headers_packet): writer.write_page(page, data) current_sequence_number += 1 #write the rest of the pages, re-sequenced and re-checksummed del (packets) pages = reader.pages(from_beginning=False) for (i, (page, data)) in enumerate(pages): page.page_sequence_number = i + current_sequence_number page.checksum = OggStreamReader.calculate_ogg_checksum(page, data) writer.write_page(page, data) reader.close() #re-write the file with our new data in "new_file" f = file(self.filename, "wb") f.write(new_file.getvalue()) f.close() writer.close() self.__read_metadata__()
def build_pages(cls, granule_position, serial_number, starting_sequence_number, packet_data, header_type=0): page = construct.Container( magic_number='OggS', version=0, header_type=header_type, granule_position=granule_position, bitstream_serial_number=serial_number, page_sequence_number=starting_sequence_number, checksum=0) if (len(packet_data) == 0): #an empty Ogg page, but possibly a continuation page.segments = 0 page.segment_lengths = [] page.checksum = OggStreamReader.calculate_ogg_checksum( page, packet_data) return [(page, "")] if (len(packet_data) > (255 * 255)): #if we need more than one Ogg page to store the packet, #handle that case recursively page.segments = 255 page.segment_lengths = [255] * 255 page.checksum = OggStreamReader.calculate_ogg_checksum( page, packet_data[0:255 * 255]) return [(page,packet_data[0:255 * 255])] + \ cls.build_pages(granule_position, serial_number, starting_sequence_number + 1, packet_data[255*255:], header_type) elif (len(packet_data) == (255 * 255)): #we need two Ogg pages, one of which is empty return cls.build_pages(granule_position, serial_number, starting_sequence_number, packet_data, header_type) + \ cls.build_pages(granule_position, serial_number, starting_sequence_number + 1, "", header_type) else: #we just need one Ogg page page.segments = len(packet_data) / 255 if ((len(packet_data) % 255) > 0): page.segments += 1 page.segment_lengths = [255] * (len(packet_data) / 255) if ((len(packet_data) % 255) > 0): page.segment_lengths += [len(packet_data) % 255] page.checksum = OggStreamReader.calculate_ogg_checksum( page, packet_data) return [(page, packet_data)]