def delete(filename, delete_v1=True, delete_v2=True): """Remove tags from a file. Keyword arguments: * delete_v1 -- delete any ID3v1 tag * delete_v2 -- delete any ID3v2 tag """ f = open(filename, 'rb+') if delete_v1: try: f.seek(-128, 2) except IOError: pass else: if f.read(3) == b'TAG': f.seek(-128, 2) f.truncate() # technically an insize=0 tag is invalid, but we delete it anyway # (primarily because we used to write it) if delete_v2: f.seek(0, 0) idata = f.read(10) try: id3, vmaj, vrev, flags, insize = unpack('>3sBBB4s', idata) except struct.error: id3, insize = b'', -1 insize = BitPaddedInt(insize) if id3 == b'ID3' and insize >= 0: delete_bytes(f, insize + 10, 0)
def test_delete_6106_79_51760(self): # This appears to be due to ANSI C limitations in read/write on rb+ # files. The problematic behavior only showed up in our mmap fallback # code for transfers of this or similar sizes. data = b''.join(str(x).encode('ascii') for x in range(12574)) # 51760 bytes o = self.data_to_file(data[:6106+79] + data[79:]) delete_bytes(o, 6106, 79) self.failUnless(data == self.read(o))
def replace(cls, fileobj, old_pages, new_pages): """Replace old_pages with new_pages within fileobj. old_pages must have come from reading fileobj originally. new_pages are assumed to have the 'same' data as old_pages, and so the serial and sequence numbers will be copied, as will the flags for the first and last pages. fileobj will be resized and pages renumbered as necessary. As such, it must be opened r+b or w+b. """ # Number the new pages starting from the first old page. first = old_pages[0].sequence for page, seq in zip(new_pages, range(first, first + len(new_pages))): page.sequence = seq page.serial = old_pages[0].serial new_pages[0].first = old_pages[0].first new_pages[0].last = old_pages[0].last new_pages[0].continued = old_pages[0].continued new_pages[-1].first = old_pages[-1].first new_pages[-1].last = old_pages[-1].last new_pages[-1].complete = old_pages[-1].complete if not new_pages[-1].complete and len(new_pages[-1].packets) == 1: new_pages[-1].position = -1 new_data = b"".join(cls.write(p) for p in new_pages) # Make room in the file for the new data. delta = len(new_data) fileobj.seek(old_pages[0].offset, 0) insert_bytes(fileobj, delta, old_pages[0].offset) fileobj.seek(old_pages[0].offset, 0) fileobj.write(new_data) new_data_end = old_pages[0].offset + delta # Go through the old pages and delete them. Since we shifted # the data down the file, we need to adjust their offsets. We # also need to go backwards, so we don't adjust the deltas of # the other pages. old_pages.reverse() for old_page in old_pages: adj_offset = old_page.offset + delta delete_bytes(fileobj, old_page.size, adj_offset) # Finally, if there's any discrepency in length, we need to # renumber the pages for the logical stream. if len(old_pages) != len(new_pages): fileobj.seek(new_data_end, 0) serial = new_pages[-1].serial sequence = new_pages[-1].sequence + 1 cls.renumber(fileobj, serial, sequence)
def delete(self, filename=None): """Remove tags from a file.""" filename = filename or self.filename fileobj = open(filename, "r+b") try: data = _APEv2Data(fileobj) if data.start is not None and data.size is not None: delete_bytes(fileobj, data.end - data.start, data.start) finally: fileobj.close() self.clear()
def save(self, filename=None): """Save changes to a file. If no filename is given, the one most recently loaded is used. Tags are always written at the end of the file, and include a header and a footer. """ filename = filename or self.filename try: fileobj = open(filename, "r+b") except IOError: fileobj = open(filename, "w+b") data = _APEv2Data(fileobj) if data.is_at_start: delete_bytes(fileobj, data.end - data.start, data.start) elif data.start is not None: fileobj.seek(data.start) # Delete an ID3v1 tag if present, too. fileobj.truncate() fileobj.seek(0, 2) # "APE tags items should be sorted ascending by size... This is # not a MUST, but STRONGLY recommended. Actually the items should # be sorted by importance/byte, but this is not feasible." tags = sorted((v._internal(k) for k, v in self.items()), key=len) num_tags = len(tags) tags = b"".join(tags) header = (b"APETAGEX" + # version, tag size, item count, flags struct.pack("<4I", 2000, len(tags) + 32, num_tags, HAS_HEADER | IS_HEADER) + b"\0" * 8) fileobj.write(header) fileobj.write(tags) footer = (b"APETAGEX" + # version, tag size, item count, flags struct.pack("<4I", 2000, len(tags) + 32, num_tags, HAS_HEADER) + b"\0" * 8) fileobj.write(footer) fileobj.close()
def test_many_changes(self, num_runs=5, num_changes=300, min_change_size=500, max_change_size=1000, min_buffer_size=1, max_buffer_size=2000): self.failUnless(min_buffer_size < min_change_size and max_buffer_size > max_change_size and min_change_size < max_change_size and min_buffer_size < max_buffer_size, "Given testing parameters make this test useless") for j in range(num_runs): data = b"ABCDEFGHIJKLMNOPQRSTUVWXYZ" * 1024 fobj = self.data_to_file(data) filesize = len(data) # Generate the list of changes to apply changes = [] for i in range(num_changes): change_size = random.randrange(min_change_size, max_change_size) change_offset = random.randrange(0, filesize) filesize += change_size changes.append((change_offset, change_size)) # Apply the changes, and make sure they all took. for offset, size in changes: buffer_size = random.randrange(min_buffer_size, max_buffer_size) insert_bytes(fobj, size, offset, BUFFER_SIZE=buffer_size) fobj.seek(0) self.failIfEqual(fobj.read(len(data)), data) fobj.seek(0, 2) self.failUnlessEqual(fobj.tell(), filesize) # Then, undo them. changes.reverse() for offset, size in changes: buffer_size = random.randrange(min_buffer_size, max_buffer_size) delete_bytes(fobj, size, offset, BUFFER_SIZE=buffer_size) fobj.seek(0) self.failUnless(fobj.read() == data)
def test_delete_middle(self): o = self.data_to_file(b'abcdefg') delete_bytes(o, 3, 2) self.assertEquals(b'abfg', self.read(o))
def test_delete_second_of_two(self): o = self.data_to_file(b'ab') delete_bytes(o, 1, 1) self.assertEquals(b'a', self.read(o))
def test_delete_first_of_two(self): o = self.data_to_file(b'ab') delete_bytes(o, 1, 0) self.assertEquals(b'b', self.read(o))
def test_delete_one(self): o = self.data_to_file(b'a') delete_bytes(o, 1, 0) self.assertEquals(b'', self.read(o))
def save(self): # Move attributes to the right objects self.to_extended_content_description = {} self.to_metadata = {} self.to_metadata_library = [] for name, value in self.tags._internal: if name in _standard_attribute_names: continue library_only = (value.data_size() > 0xFFFF or value.TYPE == GUID) if (value.language is None and value.stream is None and name not in self.to_extended_content_description and not library_only): self.to_extended_content_description[name] = value elif (value.language is None and value.stream is not None and name not in self.to_metadata and not library_only): self.to_metadata[name] = value else: self.to_metadata_library.append((name, value)) # Add missing objects if not self.content_description_obj: self.content_description_obj = \ ContentDescriptionObject() self.objects.append(self.content_description_obj) if not self.extended_content_description_obj: self.extended_content_description_obj = \ ExtendedContentDescriptionObject() self.objects.append(self.extended_content_description_obj) if not self.header_extension_obj: self.header_extension_obj = \ HeaderExtensionObject() self.objects.append(self.header_extension_obj) if not self.metadata_obj: self.metadata_obj = \ MetadataObject() self.header_extension_obj.objects.append(self.metadata_obj) if not self.metadata_library_obj: self.metadata_library_obj = \ MetadataLibraryObject() self.header_extension_obj.objects.append(self.metadata_library_obj) # Render the header data = b"".join([obj.render(self) for obj in self.objects]) data = (HeaderObject.GUID + struct.pack("<QL", len(data) + 30, len(self.objects)) + b"\x01\x02" + data) fileobj = open(self.filename, "rb+") try: size = len(data) if size > self.size: insert_bytes(fileobj, size - self.size, self.size) if size < self.size: delete_bytes(fileobj, self.size - size, 0) fileobj.seek(0) fileobj.write(data) finally: fileobj.close() self.size = size self.num_objects = len(self.objects)
def delete(self): """Removes the chunk from the file""" delete_bytes(self.__fileobj, self.size, self.offset) if self.parent_chunk is not None: self.parent_chunk.resize(self.parent_chunk.data_size - self.size)