def save(self, filename=None, deleteid3=False): """Save metadata blocks to a file. If no filename is given, the one most recently loaded is used. """ if filename is None: filename = self.filename f = open(filename, 'rb+') try: # Ensure we've got padding at the end, and only at the end. # If adding makes it too large, we'll scale it down later. self.metadata_blocks.append(Padding(b'\x00' * 1020)) MetadataBlock.group_padding(self.metadata_blocks) header = self.__check_header(f) available = self.__find_audio_offset(f) - header # "fLaC" and maybe ID3 data = MetadataBlock.writeblocks(self.metadata_blocks) # Delete ID3v2 if deleteid3 and header > 4: available += header - 4 header = 4 if len(data) > available: # If we have too much data, see if we can reduce padding. padding = self.metadata_blocks[-1] newlength = padding.length - (len(data) - available) if newlength > 0: padding.length = newlength data = MetadataBlock.writeblocks(self.metadata_blocks) assert len(data) == available elif len(data) < available: # If we have too little data, increase padding. self.metadata_blocks[-1].length += (available - len(data)) data = MetadataBlock.writeblocks(self.metadata_blocks) assert len(data) == available if len(data) != available: # We couldn't reduce the padding enough. diff = (len(data) - available) insert_bytes(f, diff, header) f.seek(header - 4) f.write(b"fLaC" + data) # Delete ID3v1 if deleteid3: try: f.seek(-128, 2) except IOError: pass else: if f.read(3) == b"TAG": f.seek(-128, 2) f.truncate() finally: f.close()
def test_insert_6106_79_51760(self): # This appears to be due to ANSI C limitations in read/write on rb+ # files. The problematic behavior only showed up in our mmap fallback # code for transfers of this or similar sizes. data = b''.join(str(x).encode('ascii') for x in range(12574)) # 51760 bytes o = self.data_to_file(data) insert_bytes(o, 6106, 79) self.failUnless(data[:6106+79] + data[79:] == self.read(o))
def replace(cls, fileobj, old_pages, new_pages): """Replace old_pages with new_pages within fileobj. old_pages must have come from reading fileobj originally. new_pages are assumed to have the 'same' data as old_pages, and so the serial and sequence numbers will be copied, as will the flags for the first and last pages. fileobj will be resized and pages renumbered as necessary. As such, it must be opened r+b or w+b. """ # Number the new pages starting from the first old page. first = old_pages[0].sequence for page, seq in zip(new_pages, range(first, first + len(new_pages))): page.sequence = seq page.serial = old_pages[0].serial new_pages[0].first = old_pages[0].first new_pages[0].last = old_pages[0].last new_pages[0].continued = old_pages[0].continued new_pages[-1].first = old_pages[-1].first new_pages[-1].last = old_pages[-1].last new_pages[-1].complete = old_pages[-1].complete if not new_pages[-1].complete and len(new_pages[-1].packets) == 1: new_pages[-1].position = -1 new_data = b"".join(cls.write(p) for p in new_pages) # Make room in the file for the new data. delta = len(new_data) fileobj.seek(old_pages[0].offset, 0) insert_bytes(fileobj, delta, old_pages[0].offset) fileobj.seek(old_pages[0].offset, 0) fileobj.write(new_data) new_data_end = old_pages[0].offset + delta # Go through the old pages and delete them. Since we shifted # the data down the file, we need to adjust their offsets. We # also need to go backwards, so we don't adjust the deltas of # the other pages. old_pages.reverse() for old_page in old_pages: adj_offset = old_page.offset + delta delete_bytes(fileobj, old_page.size, adj_offset) # Finally, if there's any discrepency in length, we need to # renumber the pages for the logical stream. if len(old_pages) != len(new_pages): fileobj.seek(new_data_end, 0) serial = new_pages[-1].serial sequence = new_pages[-1].sequence + 1 cls.renumber(fileobj, serial, sequence)
def __save_new(self, fileobj, atoms, ilst): hdlr = Atom.render(b"hdlr", b"\x00" * 8 + b"mdirappl" + b"\x00" * 9) meta = Atom.render( b"meta", b"\x00\x00\x00\x00" + hdlr + ilst + self.__pad_ilst(ilst)) try: path = atoms.path(b"moov", b"udta") except KeyError: # moov.udta not found -- create one path = atoms.path(b"moov") meta = Atom.render(b"udta", meta) offset = path[-1].offset + 8 insert_bytes(fileobj, len(meta), offset) fileobj.seek(offset) fileobj.write(meta) self.__update_parents(fileobj, path, len(meta)) self.__update_offsets(fileobj, atoms, len(meta), offset)
def save(self, filename=None, v2_version=4, v23_sep='/'): """Save ID3v2 data to the AIFF file""" framedata = self._prepare_framedata(v2_version, v23_sep) framesize = len(framedata) if filename is None: filename = self.filename # Unlike the parent ID3.save method, we won't save to a blank file # since we would have to construct a empty AIFF file fileobj = open(filename, 'rb+') iff_file = IFFFile(fileobj) try: if u'ID3' not in iff_file: iff_file.insert_chunk(u'ID3') chunk = iff_file[u'ID3'] fileobj.seek(chunk.data_offset) header = fileobj.read(10) header = self._prepare_id3_header(header, framesize, v2_version) header, new_size, _ = header data = header + framedata + (b'\x00' * (new_size - framesize)) # Include ID3 header size in 'new_size' calculation new_size += 10 # Expand the chunk if necessary, including pad byte if new_size > chunk.size: insert_at = chunk.offset + chunk.size insert_size = new_size - chunk.size + new_size % 2 insert_bytes(fileobj, insert_size, insert_at) chunk.resize(new_size) fileobj.seek(chunk.data_offset) fileobj.write(data) finally: fileobj.close()
def __save_existing(self, fileobj, atoms, path, data): # Replace the old ilst atom. ilst = path.pop() offset = ilst.offset length = ilst.length # Check for padding "free" atoms meta = path[-1] index = meta.children.index(ilst) try: prev = meta.children[index - 1] if prev.name == b"free": offset = prev.offset length += prev.length except IndexError: pass try: next = meta.children[index + 1] if next.name == b"free": length += next.length except IndexError: pass delta = len(data) - length if delta > 0 or (delta < 0 and delta > -8): data += self.__pad_ilst(data) delta = len(data) - length insert_bytes(fileobj, delta, offset) elif delta < 0: data += self.__pad_ilst(data, -delta - 8) delta = 0 fileobj.seek(offset) fileobj.write(data) self.__update_parents(fileobj, path, delta) self.__update_offsets(fileobj, atoms, delta, offset)
def test_many_changes(self, num_runs=5, num_changes=300, min_change_size=500, max_change_size=1000, min_buffer_size=1, max_buffer_size=2000): self.failUnless(min_buffer_size < min_change_size and max_buffer_size > max_change_size and min_change_size < max_change_size and min_buffer_size < max_buffer_size, "Given testing parameters make this test useless") for j in range(num_runs): data = b"ABCDEFGHIJKLMNOPQRSTUVWXYZ" * 1024 fobj = self.data_to_file(data) filesize = len(data) # Generate the list of changes to apply changes = [] for i in range(num_changes): change_size = random.randrange(min_change_size, max_change_size) change_offset = random.randrange(0, filesize) filesize += change_size changes.append((change_offset, change_size)) # Apply the changes, and make sure they all took. for offset, size in changes: buffer_size = random.randrange(min_buffer_size, max_buffer_size) insert_bytes(fobj, size, offset, BUFFER_SIZE=buffer_size) fobj.seek(0) self.failIfEqual(fobj.read(len(data)), data) fobj.seek(0, 2) self.failUnlessEqual(fobj.tell(), filesize) # Then, undo them. changes.reverse() for offset, size in changes: buffer_size = random.randrange(min_buffer_size, max_buffer_size) delete_bytes(fobj, size, offset, BUFFER_SIZE=buffer_size) fobj.seek(0) self.failUnless(fobj.read() == data)
def __save_existing(self, fileobj, atoms, path, data): # Replace the old ilst atom. ilst = path.pop() offset = ilst.offset length = ilst.length # Check for padding "free" atoms meta = path[-1] index = meta.children.index(ilst) try: prev = meta.children[index-1] if prev.name == b"free": offset = prev.offset length += prev.length except IndexError: pass try: next = meta.children[index+1] if next.name == b"free": length += next.length except IndexError: pass delta = len(data) - length if delta > 0 or (delta < 0 and delta > -8): data += self.__pad_ilst(data) delta = len(data) - length insert_bytes(fileobj, delta, offset) elif delta < 0: data += self.__pad_ilst(data, -delta - 8) delta = 0 fileobj.seek(offset) fileobj.write(data) self.__update_parents(fileobj, path, delta) self.__update_offsets(fileobj, atoms, delta, offset)
def save(self, filename=None, v1=1, v2_version=4, v23_sep='/'): """Save changes to a file. If no filename is given, the one most recently loaded is used. Keyword arguments: v1 -- if 0, ID3v1 tags will be removed if 1, ID3v1 tags will be updated but not added if 2, ID3v1 tags will be created and/or updated v2 -- version of ID3v2 tags (3 or 4). By default Mutagen saves ID3v2.4 tags. If you want to save ID3v2.3 tags, you must call method update_to_v23 before saving the file. v23_sep -- the separator used to join multiple text values if v2_version == 3. Defaults to '/' but if it's None will be the ID3v2v2.4 null separator. The lack of a way to update only an ID3v1 tag is intentional. """ if v2_version == 3: version = self._V23 elif v2_version == 4: version = self._V24 else: raise ValueError("Only 3 or 4 allowed for v2_version") # Sort frames by 'importance' order = ["TIT2", "TPE1", "TRCK", "TALB", "TPOS", "TDRC", "TCON"] order = {b: a for a, b in enumerate(order)} last = len(order) frames = sorted(self.items(), key=lambda a: order.get(a[0][:4], last)) framedata = [self.__save_frame(frame, version=version, v23_sep=v23_sep) for (key, frame) in frames] # only write unknown frames if they were loaded from the version # we are saving with or upgraded to it if self.__unknown_version == version: framedata.extend(data for data in self.unknown_frames if len(data) > 10) if not framedata: try: self.delete(filename) except EnvironmentError as err: from errno import ENOENT if err.errno != ENOENT: raise return framedata = b''.join(framedata) framesize = len(framedata) if filename is None: filename = self.filename try: f = open(filename, 'rb+') except IOError as err: from errno import ENOENT if err.errno != ENOENT: raise f = open(filename, 'ab') # create, then reopen f = open(filename, 'rb+') try: idata = f.read(10) try: id3, vmaj, vrev, flags, insize = unpack('>3sBBB4s', idata) except struct.error: id3, insize = b'', 0 insize = BitPaddedInt(insize) if id3 != b'ID3': insize = -10 if insize >= framesize: outsize = insize else: outsize = (framesize + 1023) & ~0x3FF framedata += b'\x00' * (outsize - framesize) framesize = BitPaddedInt.to_bytes(outsize, width=4) flags = 0 header = pack('>3sBBB4s', b'ID3', v2_version, 0, flags, framesize) data = header + framedata if (insize < outsize): insert_bytes(f, outsize-insize, insize+10) f.seek(0) f.write(data) try: f.seek(-128, 2) except IOError as err: # If the file is too small, that's OK - it just means # we're certain it doesn't have a v1 tag. from errno import EINVAL if err.errno != EINVAL: # If we failed to see for some other reason, bail out. raise # Since we're sure this isn't a v1 tag, don't read it. f.seek(0, 2) data = f.read(128) try: idx = data.index(b"TAG") except ValueError: offset = 0 has_v1 = False else: offset = idx - len(data) has_v1 = True f.seek(offset, 2) if v1 == 1 and has_v1 or v1 == 2: f.write(MakeID3v1(self)) else: f.truncate() finally: f.close()
def test_smaller_than_file_at_beginning(self): o = self.data_to_file(b'abcdefghij') insert_bytes(o, 3, 0) self.assertEquals(b'abcabcdefghij', self.read(o))
def test_smaller_than_file_at_end(self): o = self.data_to_file(b'abcdefghij') insert_bytes(o, 3, 10) self.assertEquals(b'abcdefghij\x00\x00\x00', self.read(o))
def test_smaller_than_file_to_end(self): o = self.data_to_file(b'abcdefghij') insert_bytes(o, 4, 6) self.assertEquals(b'abcdefghijghij', self.read(o))
def test_insert_after_one(self): o = self.data_to_file(b'a') insert_bytes(o, 8, 1) self.assertEquals(b'a' + b'\x00' * 8, self.read(o))
def test_insert_before_one(self): o = self.data_to_file(b'a') insert_bytes(o, 8, 0) self.assertEquals(b'a' + b'\x00' * 7 + b'a', self.read(o))
def test_insert_into_empty(self): o = self.data_to_file(b'') insert_bytes(o, 8, 0) self.assertEquals(b'\x00' * 8, self.read(o))
def save(self, filename=None, deleteid3=False): """Save metadata blocks to a file. If no filename is given, the one most recently loaded is used. """ if filename is None: filename = self.filename f = open(filename, 'rb+') try: # Ensure we've got padding at the end, and only at the end. # If adding makes it too large, we'll scale it down later. self.metadata_blocks.append(Padding(b'\x00' * 1020)) MetadataBlock.group_padding(self.metadata_blocks) header = self.__check_header(f) # "fLaC" and maybe ID3 available = self.__find_audio_offset(f) - header data = MetadataBlock.writeblocks(self.metadata_blocks) # Delete ID3v2 if deleteid3 and header > 4: available += header - 4 header = 4 if len(data) > available: # If we have too much data, see if we can reduce padding. padding = self.metadata_blocks[-1] newlength = padding.length - (len(data) - available) if newlength > 0: padding.length = newlength data = MetadataBlock.writeblocks(self.metadata_blocks) assert len(data) == available elif len(data) < available: # If we have too little data, increase padding. self.metadata_blocks[-1].length += (available - len(data)) data = MetadataBlock.writeblocks(self.metadata_blocks) assert len(data) == available if len(data) != available: # We couldn't reduce the padding enough. diff = (len(data) - available) insert_bytes(f, diff, header) f.seek(header - 4) f.write(b"fLaC" + data) # Delete ID3v1 if deleteid3: try: f.seek(-128, 2) except IOError: pass else: if f.read(3) == b"TAG": f.seek(-128, 2) f.truncate() finally: f.close()
def save(self): # Move attributes to the right objects self.to_extended_content_description = {} self.to_metadata = {} self.to_metadata_library = [] for name, value in self.tags._internal: if name in _standard_attribute_names: continue library_only = (value.data_size() > 0xFFFF or value.TYPE == GUID) if (value.language is None and value.stream is None and name not in self.to_extended_content_description and not library_only): self.to_extended_content_description[name] = value elif (value.language is None and value.stream is not None and name not in self.to_metadata and not library_only): self.to_metadata[name] = value else: self.to_metadata_library.append((name, value)) # Add missing objects if not self.content_description_obj: self.content_description_obj = \ ContentDescriptionObject() self.objects.append(self.content_description_obj) if not self.extended_content_description_obj: self.extended_content_description_obj = \ ExtendedContentDescriptionObject() self.objects.append(self.extended_content_description_obj) if not self.header_extension_obj: self.header_extension_obj = \ HeaderExtensionObject() self.objects.append(self.header_extension_obj) if not self.metadata_obj: self.metadata_obj = \ MetadataObject() self.header_extension_obj.objects.append(self.metadata_obj) if not self.metadata_library_obj: self.metadata_library_obj = \ MetadataLibraryObject() self.header_extension_obj.objects.append(self.metadata_library_obj) # Render the header data = b"".join([obj.render(self) for obj in self.objects]) data = (HeaderObject.GUID + struct.pack("<QL", len(data) + 30, len(self.objects)) + b"\x01\x02" + data) fileobj = open(self.filename, "rb+") try: size = len(data) if size > self.size: insert_bytes(fileobj, size - self.size, self.size) if size < self.size: delete_bytes(fileobj, self.size - size, 0) fileobj.seek(0) fileobj.write(data) finally: fileobj.close() self.size = size self.num_objects = len(self.objects)
def save(self, filename=None, v1=1, v2_version=4, v23_sep='/'): """Save changes to a file. If no filename is given, the one most recently loaded is used. Keyword arguments: v1 -- if 0, ID3v1 tags will be removed if 1, ID3v1 tags will be updated but not added if 2, ID3v1 tags will be created and/or updated v2 -- version of ID3v2 tags (3 or 4). By default Mutagen saves ID3v2.4 tags. If you want to save ID3v2.3 tags, you must call method update_to_v23 before saving the file. v23_sep -- the separator used to join multiple text values if v2_version == 3. Defaults to '/' but if it's None will be the ID3v2v2.4 null separator. The lack of a way to update only an ID3v1 tag is intentional. """ if v2_version == 3: version = self._V23 elif v2_version == 4: version = self._V24 else: raise ValueError("Only 3 or 4 allowed for v2_version") # Sort frames by 'importance' order = ["TIT2", "TPE1", "TRCK", "TALB", "TPOS", "TDRC", "TCON"] order = {b: a for a, b in enumerate(order)} last = len(order) frames = sorted(self.items(), key=lambda a: order.get(a[0][:4], last)) framedata = [ self.__save_frame(frame, version=version, v23_sep=v23_sep) for (key, frame) in frames ] # only write unknown frames if they were loaded from the version # we are saving with or upgraded to it if self.__unknown_version == version: framedata.extend(data for data in self.unknown_frames if len(data) > 10) if not framedata: try: self.delete(filename) except EnvironmentError as err: from errno import ENOENT if err.errno != ENOENT: raise return framedata = b''.join(framedata) framesize = len(framedata) if filename is None: filename = self.filename try: f = open(filename, 'rb+') except IOError as err: from errno import ENOENT if err.errno != ENOENT: raise f = open(filename, 'ab') # create, then reopen f = open(filename, 'rb+') try: idata = f.read(10) try: id3, vmaj, vrev, flags, insize = unpack('>3sBBB4s', idata) except struct.error: id3, insize = b'', 0 insize = BitPaddedInt(insize) if id3 != b'ID3': insize = -10 if insize >= framesize: outsize = insize else: outsize = (framesize + 1023) & ~0x3FF framedata += b'\x00' * (outsize - framesize) framesize = BitPaddedInt.to_bytes(outsize, width=4) flags = 0 header = pack('>3sBBB4s', b'ID3', v2_version, 0, flags, framesize) data = header + framedata if (insize < outsize): insert_bytes(f, outsize - insize, insize + 10) f.seek(0) f.write(data) try: f.seek(-128, 2) except IOError as err: # If the file is too small, that's OK - it just means # we're certain it doesn't have a v1 tag. from errno import EINVAL if err.errno != EINVAL: # If we failed to see for some other reason, bail out. raise # Since we're sure this isn't a v1 tag, don't read it. f.seek(0, 2) data = f.read(128) try: idx = data.index(b"TAG") except ValueError: offset = 0 has_v1 = False else: offset = idx - len(data) has_v1 = True f.seek(offset, 2) if v1 == 1 and has_v1 or v1 == 2: f.write(MakeID3v1(self)) else: f.truncate() finally: f.close()
def save(self, filename=None, v1=1, v2_version=4, v23_sep='/'): """Save changes to a file. If no filename is given, the one most recently loaded is used. Keyword arguments: v1 -- if 0, ID3v1 tags will be removed if 1, ID3v1 tags will be updated but not added if 2, ID3v1 tags will be created and/or updated v2 -- version of ID3v2 tags (3 or 4). By default MutagenX saves ID3v2.4 tags. If you want to save ID3v2.3 tags, you must call method update_to_v23 before saving the file. v23_sep -- the separator used to join multiple text values if v2_version == 3. Defaults to '/' but if it's None will be the ID3v2v2.4 null separator. The lack of a way to update only an ID3v1 tag is intentional. """ framedata = self._prepare_framedata(v2_version, v23_sep) framesize = len(framedata) if not framedata: try: self.delete(filename) except EnvironmentError as err: from errno import ENOENT if err.errno != ENOENT: raise return if filename is None: filename = self.filename try: f = open(filename, 'rb+') except IOError as err: from errno import ENOENT if err.errno != ENOENT: raise f = open(filename, 'ab') # create, then reopen f = open(filename, 'rb+') try: idata = f.read(10) header = self._prepare_id3_header(idata, framesize, v2_version) header, outsize, insize = header data = header + framedata + (b'\x00' * (outsize - framesize)) if (insize < outsize): insert_bytes(f, outsize-insize, insize+10) f.seek(0) f.write(data) try: f.seek(-128, 2) except IOError as err: # If the file is too small, that's OK - it just means # we're certain it doesn't have a v1 tag. from errno import EINVAL if err.errno != EINVAL: # If we failed to see for some other reason, bail out. raise # Since we're sure this isn't a v1 tag, don't read it. f.seek(0, 2) data = f.read(128) try: idx = data.index(b"TAG") except ValueError: offset = 0 has_v1 = False else: offset = idx - len(data) has_v1 = True f.seek(offset, 2) if v1 == 1 and has_v1 or v1 == 2: f.write(MakeID3v1(self)) else: f.truncate() finally: f.close()