def save(self, filename=None, v1=1, v2_version=4, v23_sep='/'): """Save changes to a file. If no filename is given, the one most recently loaded is used. Keyword arguments: v1 -- if 0, ID3v1 tags will be removed if 1, ID3v1 tags will be updated but not added if 2, ID3v1 tags will be created and/or updated v2 -- version of ID3v2 tags (3 or 4). By default Mutagen saves ID3v2.4 tags. If you want to save ID3v2.3 tags, you must call method update_to_v23 before saving the file. v23_sep -- the separator used to join multiple text values if v2_version == 3. Defaults to '/' but if it's None will be the ID3v2v2.4 null separator. The lack of a way to update only an ID3v1 tag is intentional. """ framedata = self._prepare_framedata(v2_version, v23_sep) framesize = len(framedata) if not framedata: try: self.delete(filename) except EnvironmentError as err: from errno import ENOENT if err.errno != ENOENT: raise return if filename is None: filename = self.filename try: f = open(filename, 'rb+') except IOError as err: from errno import ENOENT if err.errno != ENOENT: raise f = open(filename, 'ab') # create, then reopen f = open(filename, 'rb+') try: idata = f.read(10) header = self._prepare_id3_header(idata, framesize, v2_version) header, outsize, insize = header data = header + framedata + (b'\x00' * (outsize - framesize)) if (insize < outsize): insert_bytes(f, outsize - insize, insize + 10) f.seek(0) f.write(data) self.__save_v1(f, v1) finally: f.close()
def test_insert_bytes(self): from mutagen._util import insert_bytes expected_content = 'foo\nbaz\nbar\n' self.mount() try: self._init_mmap() try: insert_bytes(self.file_obj, 4, 4) self.file_obj.seek(4) self.file_obj.write('baz\n') self.file_obj.seek(0) content = self.file_obj.read() self.assertEqual(content, expected_content) self.file_obj.close() self.file_obj = self._open_file() content = self.file_obj.read() self.assertEqual(content, expected_content) finally: self._de_init_mmap() finally: self.umount() self.clean_up()
def __save_new(self, fileobj, atoms, ilst_data, padding_func): hdlr = Atom.render(b"hdlr", b"\x00" * 8 + b"mdirappl" + b"\x00" * 9) meta_data = b"\x00\x00\x00\x00" + hdlr + ilst_data try: path = atoms.path(b"moov", b"udta") except KeyError: path = atoms.path(b"moov") offset = path[-1]._dataoffset # ignoring some atom overhead... but we don't have padding left anyway # and padding_size is guaranteed to be less than zero content_size = get_size(fileobj) - offset padding_size = -len(meta_data) assert padding_size < 0 info = PaddingInfo(padding_size, content_size) new_padding = info._get_padding(padding_func) new_padding = min(0xFFFFFFFF, new_padding) free = Atom.render(b"free", b"\x00" * new_padding) meta = Atom.render(b"meta", meta_data + free) if path[-1].name != b"udta": # moov.udta not found -- create one data = Atom.render(b"udta", meta) else: data = meta insert_bytes(fileobj, len(data), offset) fileobj.seek(offset) fileobj.write(data) self.__update_parents(fileobj, path, len(data)) self.__update_offsets(fileobj, atoms, len(data), offset)
def save(self, filename=None, deleteid3=False): """Save metadata blocks to a file. If no filename is given, the one most recently loaded is used. """ if filename is None: filename = self.filename f = open(filename, 'rb+') try: # Ensure we've got padding at the end, and only at the end. # If adding makes it too large, we'll scale it down later. self.metadata_blocks.append(Padding('\x00' * 1020)) MetadataBlock.group_padding(self.metadata_blocks) header = self.__check_header(f) available = self.__find_audio_offset( f) - header # "fLaC" and maybe ID3 data = MetadataBlock.writeblocks(self.metadata_blocks) # Delete ID3v2 if deleteid3 and header > 4: available += header - 4 header = 4 if len(data) > available: # If we have too much data, see if we can reduce padding. padding = self.metadata_blocks[-1] newlength = padding.length - (len(data) - available) if newlength > 0: padding.length = newlength data = MetadataBlock.writeblocks(self.metadata_blocks) assert len(data) == available elif len(data) < available: # If we have too little data, increase padding. self.metadata_blocks[-1].length += (available - len(data)) data = MetadataBlock.writeblocks(self.metadata_blocks) assert len(data) == available if len(data) != available: # We couldn't reduce the padding enough. diff = (len(data) - available) insert_bytes(f, diff, header) f.seek(header - 4) f.write("fLaC" + data) # Delete ID3v1 if deleteid3: try: f.seek(-128, 2) except IOError: pass else: if f.read(3) == "TAG": f.seek(-128, 2) f.truncate() finally: f.close()
def insert_chunk(self, id_, data=None): """Insert a new chunk at the end of the RIFF or LIST""" assert isinstance(id_, str) if not is_valid_chunk_id(id_): raise KeyError("Invalid RIFF key.") next_offset = self.offset + self.size size = self.HEADER_SIZE data_size = 0 if data: data_size = len(data) padding = data_size % 2 size += data_size + padding insert_bytes(self._fileobj, size, next_offset) self._fileobj.seek(next_offset) self._fileobj.write( pack('<4si', id_.ljust(4).encode('ascii'), data_size)) self._fileobj.seek(next_offset) chunk = RiffChunk.parse(self._fileobj, self) self._update_size(chunk.size) if data: chunk.write(data) self.subchunks().append(chunk) self._fileobj.flush() return chunk
def save(self): # Move attributes to the right objects self.to_extended_content_description = {} self.to_metadata = {} self.to_metadata_library = [] for name, value in self.tags: if name in _standard_attribute_names: continue large_value = value.data_size() > 0xFFFF if (value.language is None and value.stream is None and name not in self.to_extended_content_description and not large_value): self.to_extended_content_description[name] = value elif (value.language is None and value.stream is not None and name not in self.to_metadata and not large_value): self.to_metadata[name] = value else: self.to_metadata_library.append((name, value)) # Add missing objects if not self.content_description_obj: self.content_description_obj = \ ContentDescriptionObject() self.objects.append(self.content_description_obj) if not self.extended_content_description_obj: self.extended_content_description_obj = \ ExtendedContentDescriptionObject() self.objects.append(self.extended_content_description_obj) if not self.header_extension_obj: self.header_extension_obj = \ HeaderExtensionObject() self.objects.append(self.header_extension_obj) if not self.metadata_obj: self.metadata_obj = \ MetadataObject() self.header_extension_obj.objects.append(self.metadata_obj) if not self.metadata_library_obj: self.metadata_library_obj = \ MetadataLibraryObject() self.header_extension_obj.objects.append(self.metadata_library_obj) # Render the header data = "".join([obj.render(self) for obj in self.objects]) data = (HeaderObject.GUID + struct.pack("<QL", len(data) + 30, len(self.objects)) + "\x01\x02" + data) fileobj = file(self.filename, "rb+") try: size = len(data) if size > self.size: insert_bytes(fileobj, size - self.size, self.size) if size < self.size: delete_bytes(fileobj, self.size - size, 0) fileobj.seek(0) fileobj.write(data) finally: fileobj.close()
def test_insert_6106_79_51760(self): # This appears to be due to ANSI C limitations in read/write on rb+ # files. The problematic behavior only showed up in our mmap fallback # code for transfers of this or similar sizes. data = b''.join(str(x).encode('ascii') for x in range(12574)) # 51760 bytes o = self.file(data) insert_bytes(o, 6106, 79) self.failUnless(data[:6106+79] + data[79:] == self.read(o))
def test_insert_6106_79_51760(self): # This appears to be due to ANSI C limitations in read/write on rb+ # files. data = u''.join(map(str, range(12574))) # 51760 bytes data = data.encode("ascii") with self.file(data) as o: insert_bytes(o, 6106, 79) self.failUnless(data[:6106 + 79] + data[79:] == self.read(o))
def __save_new(self, fileobj, atoms, ilst, offset): hdlr = Atom.render("hdlr", "\x00" * 8 + "mdirappl" + "\x00" * 9) meta = Atom.render("meta", "\x00\x00\x00\x00" + hdlr + ilst) moov, udta = atoms.path("moov", "udta") insert_bytes(fileobj, len(meta), udta.offset + offset + 8) fileobj.seek(udta.offset + offset + 8) fileobj.write(meta) self.__update_parents(fileobj, [moov, udta], len(meta), offset)
def test_insert_6106_79_51760(self): # This appears to be due to ANSI C limitations in read/write on rb+ # files. The problematic behavior only showed up in our mmap fallback # code for transfers of this or similar sizes. data = ''.join(map(str, range(12574))) # 51760 bytes o = self.file(data) insert_bytes(o, 6106, 79) self.failUnless(data[:6106 + 79] + data[79:] == self.read(o))
def save(self): # Move attributes to the right objects self.to_extended_content_description = {} self.to_metadata = {} self.to_metadata_library = [] for name, value in self.tags: if name in _standard_attribute_names: continue large_value = value.data_size() > 0xFFFF if (value.language is None and value.stream is None and name not in self.to_extended_content_description and not large_value): self.to_extended_content_description[name] = value elif (value.language is None and value.stream is not None and name not in self.to_metadata and not large_value): self.to_metadata[name] = value else: self.to_metadata_library.append((name, value)) # Add missing objects if not self.content_description_obj: self.content_description_obj = \ ContentDescriptionObject() self.objects.append(self.content_description_obj) if not self.extended_content_description_obj: self.extended_content_description_obj = \ ExtendedContentDescriptionObject() self.objects.append(self.extended_content_description_obj) if not self.header_extension_obj: self.header_extension_obj = \ HeaderExtensionObject() self.objects.append(self.header_extension_obj) if not self.metadata_obj: self.metadata_obj = \ MetadataObject() self.header_extension_obj.objects.append(self.metadata_obj) if not self.metadata_library_obj: self.metadata_library_obj = \ MetadataLibraryObject() self.header_extension_obj.objects.append(self.metadata_library_obj) # Render the header data = "".join([obj.render(self) for obj in self.objects]) data = (HeaderObject.GUID + struct.pack("<QL", len(data) + 30, len(self.objects)) + "\x01\x02" + data) fileobj = open(self.filename, "rb+") try: size = len(data) if size > self.size: insert_bytes(fileobj, size - self.size, self.size) if size < self.size: delete_bytes(fileobj, self.size - size, 0) fileobj.seek(0) fileobj.write(data) finally: fileobj.close()
def test_insert_6106_79_51760(self): # This appears to be due to ANSI C limitations in read/write on rb+ # files. The problematic behavior only showed up in our mmap fallback # code for transfers of this or similar sizes. data = u''.join(map(text_type, xrange(12574))) # 51760 bytes data = data.encode("ascii") with self.file(data) as o: insert_bytes(o, 6106, 79) self.failUnless(data[:6106 + 79] + data[79:] == self.read(o))
def save(self, filename=None, deleteid3=False): """Save metadata blocks to a file. If no filename is given, the one most recently loaded is used. """ if filename is None: filename = self.filename f = open(filename, 'rb+') try: # Ensure we've got padding at the end, and only at the end. # If adding makes it too large, we'll scale it down later. self.metadata_blocks.append(Padding('\x00' * 1020)) MetadataBlock.group_padding(self.metadata_blocks) header = self.__check_header(f) available = self.__find_audio_offset(f) - header # "fLaC" and maybe ID3 data = MetadataBlock.writeblocks(self.metadata_blocks) # Delete ID3v2 if deleteid3 and header > 4: available += header - 4 header = 4 if len(data) > available: # If we have too much data, see if we can reduce padding. padding = self.metadata_blocks[-1] newlength = padding.length - (len(data) - available) if newlength > 0: padding.length = newlength data = MetadataBlock.writeblocks(self.metadata_blocks) assert len(data) == available elif len(data) < available: # If we have too little data, increase padding. self.metadata_blocks[-1].length += (available - len(data)) data = MetadataBlock.writeblocks(self.metadata_blocks) assert len(data) == available if len(data) != available: # We couldn't reduce the padding enough. diff = (len(data) - available) insert_bytes(f, diff, header) f.seek(header - 4) f.write("fLaC" + data) # Delete ID3v1 if deleteid3: try: f.seek(-128, 2) except IOError: pass else: if f.read(3) == "TAG": f.seek(-128, 2) f.truncate() finally: f.close()
def replace(klass, fileobj, old_pages, new_pages): """Replace old_pages with new_pages within fileobj. old_pages must have come from reading fileobj originally. new_pages are assumed to have the 'same' data as old_pages, and so the serial and sequence numbers will be copied, as will the flags for the first and last pages. fileobj will be resized and pages renumbered as necessary. As such, it must be opened r+b or w+b. """ # Number the new pages starting from the first old page. first = old_pages[0].sequence for page, seq in zip(new_pages, list(range(first, first + len(new_pages)))): page.sequence = seq page.serial = old_pages[0].serial new_pages[0].first = old_pages[0].first new_pages[0].last = old_pages[0].last new_pages[0].continued = old_pages[0].continued new_pages[-1].first = old_pages[-1].first new_pages[-1].last = old_pages[-1].last new_pages[-1].complete = old_pages[-1].complete if not new_pages[-1].complete and len(new_pages[-1].packets) == 1: new_pages[-1].position = -1 new_data = bytearray().join(map(klass.write, new_pages)) # Make room in the file for the new data. delta = len(new_data) fileobj.seek(old_pages[0].offset, 0) insert_bytes(fileobj, delta, old_pages[0].offset) fileobj.seek(old_pages[0].offset, 0) fileobj.write(new_data) new_data_end = old_pages[0].offset + delta # Go through the old pages and delete them. Since we shifted # the data down the file, we need to adjust their offsets. We # also need to go backwards, so we don't adjust the deltas of # the other pages. old_pages.reverse() for old_page in old_pages: adj_offset = old_page.offset + delta delete_bytes(fileobj, old_page.size, adj_offset) # Finally, if there's any discrepency in length, we need to # renumber the pages for the logical stream. if len(old_pages) != len(new_pages): fileobj.seek(new_data_end, 0) serial = new_pages[-1].serial sequence = new_pages[-1].sequence + 1 klass.renumber(fileobj, serial, sequence)
def save(self, filething=None, v1=1, v2_version=4, v23_sep='/', padding=None): """save(filething=None, v1=1, v2_version=4, v23_sep='/', padding=None) Save changes to a file. Args: filething (filething): Filename to save the tag to. If no filename is given, the one most recently loaded is used. v1 (ID3v1SaveOptions): if 0, ID3v1 tags will be removed. if 1, ID3v1 tags will be updated but not added. if 2, ID3v1 tags will be created and/or updated v2 (int): version of ID3v2 tags (3 or 4). v23_sep (text): the separator used to join multiple text values if v2_version == 3. Defaults to '/' but if it's None will be the ID3v2v2.4 null separator. padding (:obj:`mutagen.PaddingFunction`) Raises: mutagen.MutagenError By default Mutagen saves ID3v2.4 tags. If you want to save ID3v2.3 tags, you must call method update_to_v23 before saving the file. The lack of a way to update only an ID3v1 tag is intentional. """ f = filething.fileobj try: header = ID3Header(filething.fileobj) except ID3NoHeaderError: old_size = 0 else: old_size = header.size data = self._prepare_data(f, 0, old_size, v2_version, v23_sep, padding) new_size = len(data) if (old_size < new_size): insert_bytes(f, new_size - old_size, old_size) elif (old_size > new_size): delete_bytes(f, old_size - new_size, new_size) f.seek(0) f.write(data) self.__save_v1(f, v1)
def replace(klass, fileobj, old_pages, new_pages): """Replace old_pages with new_pages within fileobj. old_pages must have come from reading fileobj originally. new_pages are assumed to have the 'same' data as old_pages, and so the serial and sequence numbers will be copied, as will the flags for the first and last pages. fileobj will be resized and pages renumbered as necessary. As such, it must be opened r+b or w+b. """ # Number the new pages starting from the first old page. first = old_pages[0].sequence for page, seq in zip(new_pages, range(first, first + len(new_pages))): page.sequence = seq page.serial = old_pages[0].serial new_pages[0].first = old_pages[0].first new_pages[0].last = old_pages[0].last new_pages[0].continued = old_pages[0].continued new_pages[-1].first = old_pages[-1].first new_pages[-1].last = old_pages[-1].last new_pages[-1].complete = old_pages[-1].complete if not new_pages[-1].complete and len(new_pages[-1].packets) == 1: new_pages[-1].position = -1L new_data = "".join(map(klass.write, new_pages)) # Make room in the file for the new data. delta = len(new_data) fileobj.seek(old_pages[0].offset, 0) insert_bytes(fileobj, delta, old_pages[0].offset) fileobj.seek(old_pages[0].offset, 0) fileobj.write(new_data) new_data_end = old_pages[0].offset + delta # Go through the old pages and delete them. Since we shifted # the data down the file, we need to adjust their offsets. We # also need to go backwards, so we don't adjust the deltas of # the other pages. old_pages.reverse() for old_page in old_pages: adj_offset = old_page.offset + delta delete_bytes(fileobj, old_page.size, adj_offset) # Finally, if there's any discrepency in length, we need to # renumber the pages for the logical stream. if len(old_pages) != len(new_pages): fileobj.seek(new_data_end, 0) serial = new_pages[-1].serial sequence = new_pages[-1].sequence + 1 klass.renumber(fileobj, serial, sequence)
def __save_existing(self, fileobj, atoms, path, data, offset): # Replace the old ilst atom. ilst = path.pop() delta = len(data) - ilst.length fileobj.seek(ilst.offset + offset) if delta > 0: insert_bytes(fileobj, delta, ilst.offset + offset) elif delta < 0: delete_bytes(fileobj, -delta, ilst.offset + offset) fileobj.seek(ilst.offset + offset) fileobj.write(data) self.__update_parents(fileobj, path, delta, offset)
def save(self, filething=None, v1=1, v2_version=4, v23_sep='/', padding=None): """save(filething=None, v1=1, v2_version=4, v23_sep='/', padding=None) Save changes to a file. Args: filething (filething): Filename to save the tag to. If no filename is given, the one most recently loaded is used. v1 (ID3v1SaveOptions): if 0, ID3v1 tags will be removed. if 1, ID3v1 tags will be updated but not added. if 2, ID3v1 tags will be created and/or updated v2 (int): version of ID3v2 tags (3 or 4). v23_sep (text): the separator used to join multiple text values if v2_version == 3. Defaults to '/' but if it's None will be the ID3v2v2.4 null separator. padding (:obj:`mutagen.PaddingFunction`) Raises: mutagen.MutagenError By default Mutagen saves ID3v2.4 tags. If you want to save ID3v2.3 tags, you must call method update_to_v23 before saving the file. The lack of a way to update only an ID3v1 tag is intentional. """ f = filething.fileobj try: header = ID3Header(filething.fileobj) except ID3NoHeaderError: old_size = 0 else: old_size = header.size data = self._prepare_data( f, 0, old_size, v2_version, v23_sep, padding) new_size = len(data) if (old_size < new_size): insert_bytes(f, new_size - old_size, old_size) elif (old_size > new_size): delete_bytes(f, old_size - new_size, new_size) f.seek(0) f.write(data) self.__save_v1(f, v1)
def __save_new(self, fileobj, atoms, ilst): hdlr = Atom.render("hdlr", "\x00" * 8 + "mdirappl" + "\x00" * 9) meta = Atom.render("meta", "\x00\x00\x00\x00" + hdlr + ilst + self.__pad_ilst(ilst)) try: path = atoms.path("moov", "udta") except KeyError: # moov.udta not found -- create one path = atoms.path("moov") meta = Atom.render("udta", meta) offset = path[-1].offset + 8 insert_bytes(fileobj, len(meta), offset) fileobj.seek(offset) fileobj.write(meta) self.__update_parents(fileobj, path, len(meta)) self.__update_offsets(fileobj, atoms, len(meta), offset)
def __save_new(self, fileobj, atoms, ilst): hdlr = Atom.render(b"hdlr", b"\x00" * 8 + b"mdirapplb" + b"\x00" * 9) meta = Atom.render( b"meta", b"\x00\x00\x00\x00" + hdlr + ilst + self.__pad_ilst(ilst)) try: path = atoms.path("moov", "udta") except KeyError: # moov.udta not found -- create one path = atoms.path("moov") meta = Atom.render("udta", meta) offset = path[-1].offset + 8 insert_bytes(fileobj, len(meta), offset) fileobj.seek(offset) fileobj.write(meta) self.__update_parents(fileobj, path, len(meta)) self.__update_offsets(fileobj, atoms, len(meta), offset)
def test_many_changes(self, num_runs=5, num_changes=300, min_change_size=500, max_change_size=1000, min_buffer_size=1, max_buffer_size=2000): self.failUnless( min_buffer_size < min_change_size and max_buffer_size > max_change_size and min_change_size < max_change_size and min_buffer_size < max_buffer_size, "Given testing parameters make this test useless") for j in range(num_runs): data = b"ABCDEFGHIJKLMNOPQRSTUVWXYZ" * 1024 with self.file(data) as fobj: filesize = len(data) # Generate the list of changes to apply changes = [] for i in range(num_changes): change_size = random.randrange(min_change_size, max_change_size) change_offset = random.randrange(0, filesize) filesize += change_size changes.append((change_offset, change_size)) # Apply the changes, and make sure they all took. for offset, size in changes: buffer_size = random.randrange(min_buffer_size, max_buffer_size) insert_bytes(fobj, size, offset, BUFFER_SIZE=buffer_size) fobj.seek(0) self.failIfEqual(fobj.read(len(data)), data) fobj.seek(0, 2) self.failUnlessEqual(fobj.tell(), filesize) # Then, undo them. changes.reverse() for offset, size in changes: buffer_size = random.randrange(min_buffer_size, max_buffer_size) delete_bytes(fobj, size, offset, BUFFER_SIZE=buffer_size) fobj.seek(0) self.failUnless(fobj.read() == data)
def save(self, filename=None, v2_version=4, v23_sep='/'): """Save ID3v2 data to the AIFF file""" framedata = self._prepare_framedata(v2_version, v23_sep) framesize = len(framedata) if filename is None: filename = self.filename # Unlike the parent ID3.save method, we won't save to a blank file # since we would have to construct a empty AIFF file fileobj = open(filename, 'rb+') iff_file = IFFFile(fileobj) try: if u'ID3' not in iff_file: iff_file.insert_chunk(u'ID3') chunk = iff_file[u'ID3'] fileobj.seek(chunk.data_offset) header = fileobj.read(10) header = self._prepare_id3_header(header, framesize, v2_version) header, new_size, _ = header data = header + framedata + (b'\x00' * (new_size - framesize)) # Include ID3 header size in 'new_size' calculation new_size += 10 # Expand the chunk if necessary, including pad byte if new_size > chunk.size: insert_at = chunk.offset + chunk.size insert_size = new_size - chunk.size + new_size % 2 insert_bytes(fileobj, insert_size, insert_at) chunk.resize(new_size) fileobj.seek(chunk.data_offset) fileobj.write(data) finally: fileobj.close()
def test_many_changes(self, num_runs=5, num_changes=300, min_change_size=500, max_change_size=1000, min_buffer_size=1, max_buffer_size=2000): self.failUnless(min_buffer_size < min_change_size and max_buffer_size > max_change_size and min_change_size < max_change_size and min_buffer_size < max_buffer_size, "Given testing parameters make this test useless") for j in xrange(num_runs): data = b"ABCDEFGHIJKLMNOPQRSTUVWXYZ" * 1024 fobj = self.file(data) filesize = len(data) # Generate the list of changes to apply changes = [] for i in xrange(num_changes): change_size = random.randrange( min_change_size, max_change_size) change_offset = random.randrange(0, filesize) filesize += change_size changes.append((change_offset, change_size)) # Apply the changes, and make sure they all took. for offset, size in changes: buffer_size = random.randrange( min_buffer_size, max_buffer_size) insert_bytes(fobj, size, offset, BUFFER_SIZE=buffer_size) fobj.seek(0) self.failIfEqual(fobj.read(len(data)), data) fobj.seek(0, 2) self.failUnlessEqual(fobj.tell(), filesize) # Then, undo them. changes.reverse() for offset, size in changes: buffer_size = random.randrange( min_buffer_size, max_buffer_size) delete_bytes(fobj, size, offset, BUFFER_SIZE=buffer_size) fobj.seek(0) self.failUnless(fobj.read() == data)
def insert_chunk(self, id_, data=None): """Insert a new chunk at the end of the container chunk""" if not is_valid_chunk_id(id_): raise KeyError("Invalid IFF key.") next_offset = self.offset + self.size size = self.HEADER_SIZE data_size = 0 if data: data_size = len(data) padding = data_size % 2 size += data_size + padding insert_bytes(self._fileobj, size, next_offset) self._fileobj.seek(next_offset) self.write_new_header(id_.ljust(4).encode('ascii'), data_size) self._fileobj.seek(next_offset) chunk = self.parse_next_subchunk() self._update_size(chunk.size) if data: chunk.write(data) self.subchunks().append(chunk) self._fileobj.flush() return chunk
def __save_existing(self, fileobj, atoms, path, data): # Replace the old ilst atom. ilst = path.pop() offset = ilst.offset length = ilst.length # Check for padding b"free" atoms meta = path[-1] index = meta.children.index(ilst) try: prev = meta.children[index-1] if prev.name == b"free": offset = prev.offset length += prev.length except IndexError: pass try: next = meta.children[index+1] if next.name == b"free": length += next.length except IndexError: pass delta = len(data) - length if delta > 0 or (delta < 0 and delta > -8): data += self.__pad_ilst(data) delta = len(data) - length insert_bytes(fileobj, delta, offset) elif delta < 0: data += self.__pad_ilst(data, -delta - 8) delta = 0 fileobj.seek(offset) fileobj.write(data) self.__update_parents(fileobj, path, delta) self.__update_offsets(fileobj, atoms, delta, offset)
def test_zero(self): with self.file(b'abcdefghij') as o: insert_bytes(o, 0, 1) self.assertEqual(b'abcdefghij', self.read(o))
def test_insert_after_one(self): o = self.file(b'a') insert_bytes(o, 8, 1) self.assertEquals(b'a' + b'\x00' * 8, self.read(o))
if id3 != 'ID3': insize = -10 if insize >= framesize: outsize = insize else: outsize = (framesize + 1023) & ~0x3FF framedata += '\x00' * (outsize - framesize) framesize = BitPaddedInt.to_str(outsize, width=4) flags = 0 header = pack('>3sBBB4s', 'ID3', v2, 0, flags, framesize) data = header + framedata if (insize < outsize): insert_bytes(f, outsize-insize, insize+10) f.seek(0) f.write(data) try: f.seek(-128, 2) except IOError, err: from errno import EINVAL if err.errno != EINVAL: raise f.seek(0, 2) # ensure read won't get "TAG" if f.read(3) == "TAG": f.seek(-128, 2) if v1 > 0: f.write(MakeID3v1(self))
def save(self, filename=None, v1=1, v2_version=4, v23_sep="/"): """Save changes to a file. If no filename is given, the one most recently loaded is used. Keyword arguments: v1 -- if 0, ID3v1 tags will be removed if 1, ID3v1 tags will be updated but not added if 2, ID3v1 tags will be created and/or updated v2 -- version of ID3v2 tags (3 or 4). By default Mutagen saves ID3v2.4 tags. If you want to save ID3v2.3 tags, you must call method update_to_v23 before saving the file. v23_sep -- the separator used to join multiple text values if v2_version == 3. Defaults to '/' but if it's None will be the ID3v2v2.4 null separator. The lack of a way to update only an ID3v1 tag is intentional. """ framedata = self._prepare_framedata(v2_version, v23_sep) framesize = len(framedata) if not framedata: try: self.delete(filename) except EnvironmentError as err: from errno import ENOENT if err.errno != ENOENT: raise return if filename is None: filename = self.filename try: f = open(filename, "rb+") except IOError as err: from errno import ENOENT if err.errno != ENOENT: raise f = open(filename, "ab") # create, then reopen f = open(filename, "rb+") try: idata = f.read(10) header = self._prepare_id3_header(idata, framesize, v2_version) header, outsize, insize = header data = header + framedata + (b"\x00" * (outsize - framesize)) if insize < outsize: insert_bytes(f, outsize - insize, insize + 10) f.seek(0) f.write(data) try: f.seek(-128, 2) except IOError as err: # If the file is too small, that's OK - it just means # we're certain it doesn't have a v1 tag. from errno import EINVAL if err.errno != EINVAL: # If we failed to see for some other reason, bail out. raise # Since we're sure this isn't a v1 tag, don't read it. f.seek(0, 2) data = f.read(128) try: idx = data.index(b"TAG") except ValueError: offset = 0 has_v1 = False else: offset = idx - len(data) has_v1 = True f.seek(offset, 2) if v1 == 1 and has_v1 or v1 == 2: f.write(MakeID3v1(self)) else: f.truncate() finally: f.close()
def test_smaller_than_file_to_end(self): o = self.file(b'abcdefghij') insert_bytes(o, 4, 6) self.assertEquals(b'abcdefghijghij', self.read(o))
def test_smaller_than_file_at_beginning(self): with self.file(b'abcdefghij') as o: insert_bytes(o, 3, 0) self.assertEqual(b'abcabcdefghij', self.read(o))
if id3 != 'ID3': insize = -10 if insize >= framesize: outsize = insize else: outsize = (framesize + 1023) & ~0x3FF framedata += '\x00' * (outsize - framesize) framesize = BitPaddedInt.to_str(outsize, width=4) flags = 0 header = pack('>3sBBB4s', 'ID3', v2, 0, flags, framesize) data = header + framedata if (insize < outsize): insert_bytes(f, outsize - insize, insize + 10) f.seek(0) f.write(data) try: f.seek(-128, 2) except IOError, err: from errno import EINVAL if err.errno != EINVAL: raise f.seek(0, 2) # ensure read won't get "TAG" if f.read(3) == "TAG": f.seek(-128, 2) if v1 > 0: f.write(MakeID3v1(self))
def test_smaller_than_file_across_end(self): with self.file(b'abcdefghij') as o: insert_bytes(o, 4, 8) self.assertEqual(b'abcdefghij\x00\x00ij', self.read(o))
def save(self): # Move attributes to the right objects self.to_content_description = {} self.to_extended_content_description = {} self.to_metadata = {} self.to_metadata_library = [] for name, value in self.tags: library_only = (value.data_size() > 0xFFFF or value.TYPE == GUID) can_cont_desc = value.TYPE == UNICODE if library_only or value.language is not None: self.to_metadata_library.append((name, value)) elif value.stream is not None: if name not in self.to_metadata: self.to_metadata[name] = value else: self.to_metadata_library.append((name, value)) elif name in ContentDescriptionObject.NAMES: if name not in self.to_content_description and can_cont_desc: self.to_content_description[name] = value else: self.to_metadata_library.append((name, value)) else: if name not in self.to_extended_content_description: self.to_extended_content_description[name] = value else: self.to_metadata_library.append((name, value)) # Add missing objects if not self.content_description_obj: self.content_description_obj = \ ContentDescriptionObject() self.objects.append(self.content_description_obj) if not self.extended_content_description_obj: self.extended_content_description_obj = \ ExtendedContentDescriptionObject() self.objects.append(self.extended_content_description_obj) if not self.header_extension_obj: self.header_extension_obj = \ HeaderExtensionObject() self.objects.append(self.header_extension_obj) if not self.metadata_obj: self.metadata_obj = \ MetadataObject() self.header_extension_obj.objects.append(self.metadata_obj) if not self.metadata_library_obj: self.metadata_library_obj = \ MetadataLibraryObject() self.header_extension_obj.objects.append(self.metadata_library_obj) # Render the header data = b"".join([obj.render(self) for obj in self.objects]) data = (HeaderObject.GUID + struct.pack("<QL", len(data) + 30, len(self.objects)) + b"\x01\x02" + data) with open(self.filename, "rb+") as fileobj: size = len(data) if size > self.size: insert_bytes(fileobj, size - self.size, self.size) if size < self.size: delete_bytes(fileobj, self.size - size, 0) fileobj.seek(0) fileobj.write(data) self.size = size self.num_objects = len(self.objects)
def save(self, filename=None, v1=0): """Save changes to a file. If no filename is given, the one most recently loaded is used. Keyword arguments: v1 -- if 0, ID3v1 tags will be removed if 1, ID3v1 tags will be updated but not added if 2, ID3v1 tags will be created and/or updated The lack of a way to update only an ID3v1 tag is intentional. """ # Sort frames by 'importance' order = ["TIT2", "TPE1", "TRCK", "TALB", "TPOS", "TDRC", "TCON"] order = dict(zip(order, range(len(order)))) last = len(order) frames = self.items() frames.sort(lambda a, b: cmp(order.get(a[0][:4], last), order.get(b[0][:4], last))) framedata = [self.__save_frame(frame) for (key, frame) in frames] framedata.extend([data for data in self.unknown_frames if len(data) > 10]) framedata = "".join(framedata) framesize = len(framedata) if filename is None: filename = self.filename f = open(filename, "rb+") try: idata = f.read(10) try: id3, vmaj, vrev, flags, insize = struct.unpack(">3sBBB4s", idata) except struct.error: id3, insize = "", 0 insize = BitPaddedInt(insize) if id3 != "ID3": insize = -10 if insize >= framesize: outsize = insize else: outsize = (framesize + 1023) & ~0x3FF framedata += "\x00" * (outsize - framesize) framesize = BitPaddedInt.to_str(outsize, width=4) flags = 0 header = struct.pack(">3sBBB4s", "ID3", 4, 0, flags, framesize) data = header + framedata if insize < outsize: insert_bytes(f, outsize - insize, insize + 10) f.seek(0) try: f.seek(-128, 2) except IOError, err: if err.errno != EINVAL: raise f.seek(0, 2) # ensure read won't get "TAG" if f.read(3) == "TAG": f.seek(-128, 2) if v1 > 0: f.write(MakeID3v1(self)) else: f.truncate() elif v1 == 2: f.seek(0, 2) f.write(MakeID3v1(self))
def save(self, filename=None, v1=1, v2_version=4, v23_sep='/', padding=None): """Save changes to a file. Args: filename: Filename to save the tag to. If no filename is given, the one most recently loaded is used. v1 (ID3v1SaveOptions): if 0, ID3v1 tags will be removed. if 1, ID3v1 tags will be updated but not added. if 2, ID3v1 tags will be created and/or updated v2 (int): version of ID3v2 tags (3 or 4). v23_sep (str): the separator used to join multiple text values if v2_version == 3. Defaults to '/' but if it's None will be the ID3v2v2.4 null separator. padding (function): A function taking a PaddingInfo which should return the amount of padding to use. If None (default) will default to something reasonable. By default Mutagen saves ID3v2.4 tags. If you want to save ID3v2.3 tags, you must call method update_to_v23 before saving the file. The lack of a way to update only an ID3v1 tag is intentional. Can raise id3.error. """ if filename is None: filename = self.filename try: f = open(filename, 'rb+') except IOError as err: from errno import ENOENT if err.errno != ENOENT: raise f = open(filename, 'ab') # create, then reopen f = open(filename, 'rb+') try: try: header = ID3Header(f) except ID3NoHeaderError: old_size = 0 else: old_size = header.size data = self._prepare_data( f, 0, old_size, v2_version, v23_sep, padding) new_size = len(data) if (old_size < new_size): insert_bytes(f, new_size - old_size, old_size) elif (old_size > new_size): delete_bytes(f, old_size - new_size, new_size) f.seek(0) f.write(data) self.__save_v1(f, v1) finally: f.close()
def save(self, filename=None, v1=1, v2=4): """Save changes to a file. If no filename is given, the one most recently loaded is used. Keyword arguments: v1 -- if 0, ID3v1 tags will be removed if 1, ID3v1 tags will be updated but not added if 2, ID3v1 tags will be created and/or updated v2 -- version of ID3v2 tags (3 or 4). By default Mutagen saves ID3v2.4 tags. If you want to save ID3v2.3 tags, you must call method update_to_v23 before saving the file. The lack of a way to update only an ID3v1 tag is intentional. """ # Sort frames by 'importance' order = ["TIT2", "TPE1", "TRCK", "TALB", "TPOS", "TDRC", "TCON"] order = dict(zip(order, range(len(order)))) last = len(order) frames = self.items() frames.sort(lambda a, b: cmp(order.get(a[0][:4], last), order.get(b[0][:4], last))) framedata = [self.__save_frame(frame, v2) for (key, frame) in frames] framedata.extend([data for data in self.unknown_frames if len(data) > 10]) if not framedata: try: self.delete(filename) except EnvironmentError as err: from errno import ENOENT if err.errno != ENOENT: raise return framedata = ''.join(framedata) framesize = len(framedata) if filename is None: filename = self.filename try: f = open(filename, 'rb+') except IOError as err: from errno import ENOENT if err.errno != ENOENT: raise f = open(filename, 'ab') # create, then reopen f = open(filename, 'rb+') try: idata = f.read(10) try: id3, vmaj, vrev, flags, insize = unpack('>3sBBB4s', idata) except struct.error: id3, insize = '', 0 insize = BitPaddedInt(insize) if id3 != 'ID3': insize = -10 if insize >= framesize: outsize = insize else: outsize = (framesize + 1023) & ~0x3FF framedata += '\x00' * (outsize - framesize) framesize = BitPaddedInt.to_str(outsize, width=4) flags = 0 header = pack('>3sBBB4s', 'ID3', v2, 0, flags, framesize) data = header + framedata if (insize < outsize): insert_bytes(f, outsize - insize, insize + 10) f.seek(0) f.write(data) try: f.seek(-128, 2) except IOError as err: from errno import EINVAL if err.errno != EINVAL: raise f.seek(0, 2) # ensure read won't get "TAG" if f.read(3) == "TAG": f.seek(-128, 2) if v1 > 0: f.write(MakeID3v1(self)) else: f.truncate() elif v1 == 2: f.seek(0, 2) f.write(MakeID3v1(self)) finally: f.close()
def test_smaller_than_file_at_end(self): with self.file(b'abcdefghij') as o: insert_bytes(o, 3, 10) self.assertEqual(b'abcdefghij\x00\x00\x00', self.read(o))
def test_smaller_than_file_at_end(self): o = self.file(b'abcdefghij') insert_bytes(o, 3, 10) self.assertEquals(b'abcdefghij\x00\x00\x00', self.read(o))
def test_insert_before_one(self): o = self.file(b'a') insert_bytes(o, 8, 0) self.assertEquals(b'a' + b'\x00' * 7 + b'a', self.read(o))
def test_insert_into_empty(self): with self.file(b'') as o: insert_bytes(o, 8, 0) self.assertEqual(b'\x00' * 8, self.read(o))
def test_smaller_than_file_middle(self): o = self.file(b'abcdefghij') insert_bytes(o, 4, 4) self.assertEquals(b'abcdefghefghij', self.read(o))
def test_insert_before_one(self): with self.file(b'a') as o: insert_bytes(o, 8, 0) self.assertEqual(b'a' + b'\x00' * 7 + b'a', self.read(o))
def test_smaller_than_file_across_end(self): o = self.file(b'abcdefghij') insert_bytes(o, 4, 8) self.assertEquals(b'abcdefghij\x00\x00ij', self.read(o))
def test_insert_after_one(self): with self.file(b'a') as o: insert_bytes(o, 8, 1) self.assertEqual(b'a' + b'\x00' * 8, self.read(o))
def test_smaller_than_file_at_beginning(self): o = self.file(b'abcdefghij') insert_bytes(o, 3, 0) self.assertEquals(b'abcabcdefghij', self.read(o))
def test_smaller_than_file_middle(self): with self.file(b'abcdefghij') as o: insert_bytes(o, 4, 4) self.assertEqual(b'abcdefghefghij', self.read(o))
def test_insert_into_empty(self): o = self.file(b'') insert_bytes(o, 8, 0) self.assertEquals(b'\x00' * 8, self.read(o))
def test_smaller_than_file_to_end(self): with self.file(b'abcdefghij') as o: insert_bytes(o, 4, 6) self.assertEqual(b'abcdefghijghij', self.read(o))