def test_parted_no_partitions_in_sectors_parsing(self): input_parted_string = """Model: Mock up test Disk /dev/sdc: 123s Sector size (logical/physical): 512s/512s Partition Table: unknown Disk Flags: """ parted_dict = Parted.parse_parted_output(input_parted_string) print("Output dict was: " + str(parted_dict))
def test_flag_detection(self): # Modified input string to add "bios_grub" to flag section input_parted_gpt_string = """Model: Testing a certain flag Disk /dev/sdc: 2147483648B Sector size (logical/physical): 512B/512B Partition Table: gpt Disk Flags: Number Start End Size File system Name Flags 1 1048576B 65011711B 63963136B ext4 2 65011712B 185597951B 120586240B fat32 test,bios_grub 3 185597952B 307232767B 121634816B ntfs test,msftdata,boot """ parted_dict = Parted.parse_parted_output(input_parted_gpt_string) self.assertEqual( Parted.get_partitions_containing_flag(parted_dict, "bios_grub"), [2]) self.assertEqual( Parted.get_partitions_containing_flag(parted_dict, "boot"), [3]) self.assertEqual( Parted.get_partitions_containing_flag(parted_dict, "test"), [2, 3]) parted_dict['partitions'][2]['flags'] = "asdf" self.assertFalse( Parted.get_partitions_containing_flag(parted_dict, "bios_grub"), []) parted_dict['partitions'][2]['flags'] = "msftdata,bios_grub" self.assertTrue( Parted.get_partitions_containing_flag(parted_dict, "bios_grub"), [3])
def test_parted_gpt_parsing(self): # Output of: parted -s /dev/sdc unit B print input_parted_gpt_string = """Model: ATA VBOX HARDDISK (scsi) Disk /dev/sdc: 2147483648B Sector size (logical/physical): 512B/512B Partition Table: gpt Disk Flags: Number Start End Size File system Name Flags 1 1048576B 65011711B 63963136B ext4 2 65011712B 185597951B 120586240B fat32 msftdata 3 185597952B 307232767B 121634816B ntfs msftdata 4 307232768B 340787199B 33554432B xfs 5 340787200B 540016639B 199229440B 6 540016640B 588251135B 48234496B hfs+ 7 588251136B 622854143B 34603008B linux-swap(v1) swap 8 622854144B 2146435071B 1523580928B ntfs msftdata """ parted_dict = Parted.parse_parted_output(input_parted_gpt_string) print("Output dict was: " + str(parted_dict)) self.assertEqual(parted_dict['model'], "ATA VBOX HARDDISK (scsi)") self.assertEqual(parted_dict['long_dev_node'], "/dev/sdc") self.assertEqual(parted_dict['capacity'], 2147483648) self.assertEqual(parted_dict['logical_sector_size'], 512) self.assertEqual(parted_dict['physical_sector_size'], 512) self.assertEqual(parted_dict['partition_table'], "gpt") self.assertEqual(parted_dict['flags'], "") self.assertEqual(len(parted_dict['partitions']), 8) partition1 = parted_dict['partitions'][1] self.assertEqual(partition1['start'], 1048576) self.assertEqual(partition1['end'], 65011711) self.assertEqual(partition1['size'], 63963136) self.assertEqual(partition1['filesystem'], "ext4") self.assertEqual(partition1['name'], "") self.assertEqual(partition1['flags'], "") partition7 = parted_dict['partitions'][6] self.assertEqual(partition7['filesystem'], "hfs+") partition7 = parted_dict['partitions'][7] self.assertEqual(partition7['filesystem'], "linux-swap(v1)") self.assertEqual(partition7['flags'], "swap")
def test_parted_mbr_parsing(self): # Output of: parted -s /dev/sdc unit B print input_parted_mbr_string = """Model: ATA VBOX HARDDISK (scsi) Disk /dev/sdc: 2147483648B Sector size (logical/physical): 512B/512B Partition Table: msdos Disk Flags: Number Start End Size Type File system Flags 1 1048576B 91226111B 90177536B primary ext4 2 91226112B 185597951B 94371840B primary ntfs 3 185597952B 338690047B 153092096B primary fat32 4 338690048B 2147483647B 1808793600B extended 5 339738624B 350224383B 10485760B logical linux-swap(v1) 6 351272960B 419430399B 68157440B logical 7 420478976B 688914431B 268435456B logical btrfs 8 689963008B 723517439B 33554432B logical xfs 9 724566016B 2147483647B 1422917632B logical ntfs """ parted_dict = Parted.parse_parted_output(input_parted_mbr_string) print("Output dict was: " + str(parted_dict)) self.assertEqual(parted_dict['model'], "ATA VBOX HARDDISK (scsi)") self.assertEqual(parted_dict['long_dev_node'], "/dev/sdc") self.assertEqual(parted_dict['capacity'], 2147483648) self.assertEqual(parted_dict['logical_sector_size'], 512) self.assertEqual(parted_dict['logical_sector_size'], 512) self.assertEqual(parted_dict['physical_sector_size'], 512) self.assertEqual(parted_dict['partition_table'], "msdos") self.assertEqual(parted_dict['flags'], "") self.assertEqual(len(parted_dict['partitions']), 9) self.assertEqual(parted_dict['partitions'][3]['type'], "primary") self.assertEqual(parted_dict['partitions'][4]['type'], "extended") self.assertEqual(parted_dict['partitions'][5]['type'], "logical") partition9 = parted_dict['partitions'][9] self.assertEqual(partition9['start'], 724566016) self.assertEqual(partition9['end'], 2147483647) self.assertEqual(partition9['size'], 1422917632) self.assertEqual(partition9['type'], "logical") self.assertEqual(partition9['filesystem'], "ntfs") self.assertEqual(partition9['flags'], "")
def __init__(self, absolute_qemu_img_path, enduser_filename, timeout_seconds): self.timeout_seconds = timeout_seconds self.image_format = "QEMU_FORMAT" self.absolute_path = absolute_qemu_img_path self.enduser_filename = enduser_filename self.normalized_sfdisk_dict = {'absolute_path': None, 'sfdisk_dict': {'partitions': {}}, 'file_length': 0} self.user_notes = "" self.warning_dict = {} # Clonezilla format self.ebr_dict = {} self.short_device_node_partition_list = [] self.short_device_node_disk_list = [] self.lvm_vg_dev_dict = {} self.lvm_logical_volume_dict = {} self.dev_fs_dict = {} self.size_bytes = 0 self.enduser_readable_size = "" self.is_needs_decryption = False self.parted_dict = {'partitions': {}} self.post_mbr_gap_absolute_path = {} statbuf = os.stat(self.absolute_path) self.last_modified_timestamp = format_datetime(datetime.fromtimestamp(statbuf.st_mtime)) print("Last modified timestamp " + self.last_modified_timestamp) dir = Path(absolute_qemu_img_path).parent.as_posix() print("Qemu directory : " + dir) qemu_img_cmd_list = ["qemu-img", "info", absolute_qemu_img_path] process, flat_command_string, fail_description = Utility.run("qemu-img info", qemu_img_cmd_list, use_c_locale=True) if process.returncode != 0: self.warning_dict[flat_command_string] = process.stderr return self.qemu_img_dict = QemuImage.parse_qemu_img_info(process.stdout) self.enduser_readable_size = self.qemu_img_dict['disk size'] is_associated, failed_message = self.associate_nbd(QEMU_NBD_NBD_DEVICE) if not is_associated: self.warning_dict[flat_command_string] = "Could not associate: " + failed_message return self.normalized_sfdisk_dict = {'absolute_path': None, 'sfdisk_dict': {'partitions': {}}, 'file_length': 0} process, flat_command_string, failed_message = Utility.run("Get partition table", ["sfdisk", "--dump", QEMU_NBD_NBD_DEVICE], use_c_locale=True) if process.returncode != 0: self.warning_dict[flat_command_string] = "Could not extract partition table: " + process.stderr # Not returning here so can disconnect. else: sfdisk_string = process.stdout f = tempfile.NamedTemporaryFile(mode='w', delete=False) f.write(sfdisk_string) f.close() self.normalized_sfdisk_dict = Sfdisk.generate_normalized_sfdisk_dict(f.name, self) parted_process, flat_command_string, failed_message = Utility.run("Get filesystem information", ["parted", "--script", QEMU_NBD_NBD_DEVICE, "unit", "s", "print"], use_c_locale=True) if process.returncode != 0: self.warning_dict[flat_command_string] = "Could not extract filesystem: " + process.stderr # Not returning here so can disconnect. else: self.parted_dict = Parted.parse_parted_output(parted_process.stdout) is_success, failed_message = QemuImage.deassociate_nbd(QEMU_NBD_NBD_DEVICE) if not is_success: self.warning_dict[flat_command_string] = failed_message return
def test_combined_drive_state(self): parted_dict_dict = {} sfdict_dict_dict = {} lsblk_json_output = """{ "blockdevices": [ {"kname":"/dev/loop0", "name":"/dev/loop0", "size":698761216, "type":"loop", "fstype":"squashfs", "mountpoint":"/rofs", "model":null}, {"kname":"/dev/sda", "name":"/dev/sda", "size":34359738368, "type":"disk", "fstype":null, "mountpoint":null, "model":"VBOX_HARDDISK", "children": [ {"kname":"/dev/sda1", "name":"/dev/sda1", "size":34357641216, "type":"part", "fstype":"ntfs", "mountpoint":"/mnt/backup", "model":null} ] }, {"kname":"/dev/sdb", "name":"/dev/sdb", "size":1073741824, "type":"disk", "fstype":"LVM2_member", "mountpoint":null, "model":"VBOX_HARDDISK", "children": [ {"kname":"/dev/dm-0", "name":"/dev/mapper/vgtest-lvtest", "size":1069547520, "type":"lvm", "fstype":"ext4", "mountpoint":null, "model":null} ] }, {"kname":"/dev/sdc", "name":"/dev/sdc", "size":1610612736, "type":"disk", "fstype":"ntfs", "mountpoint":null, "model":"VBOX_HARDDISK"}, {"kname":"/dev/sdd", "name":"/dev/sdd", "size":2147483648, "type":"disk", "fstype":null, "mountpoint":null, "model":"VBOX_HARDDISK", "children": [ {"kname":"/dev/sdd1", "name":"/dev/sdd1", "size":3145728, "type":"part", "fstype":"ext4", "mountpoint":null, "model":null}, {"kname":"/dev/sdd2", "name":"/dev/sdd2", "size":44040192, "type":"part", "fstype":"ext4", "mountpoint":null, "model":null}, {"kname":"/dev/sdd4", "name":"/dev/sdd4", "size":1024, "type":"part", "fstype":null, "mountpoint":null, "model":null}, {"kname":"/dev/sdd5", "name":"/dev/sdd5", "size":12582912, "type":"part", "fstype":"ext4", "mountpoint":null, "model":null}, {"kname":"/dev/sdd6", "name":"/dev/sdd6", "size":4194304, "type":"part", "fstype":"ext4", "mountpoint":null, "model":null}, {"kname":"/dev/sdd7", "name":"/dev/sdd7", "size":28311552, "type":"part", "fstype":"ext4", "mountpoint":null, "model":null}, {"kname":"/dev/sdd8", "name":"/dev/sdd8", "size":4194304, "type":"part", "fstype":"ext4", "mountpoint":null, "model":null}, {"kname":"/dev/sdd9", "name":"/dev/sdd9", "size":20971520, "type":"part", "fstype":"ext4", "mountpoint":null, "model":null}, {"kname":"/dev/sdd10", "name":"/dev/sdd10", "size":83886080, "type":"part", "fstype":"ext4", "mountpoint":null, "model":null}, {"kname":"/dev/sdd11", "name":"/dev/sdd11", "size":72351744, "type":"part", "fstype":"ext4", "mountpoint":null, "model":null}, {"kname":"/dev/sdd12", "name":"/dev/sdd12", "size":18874368, "type":"part", "fstype":"ext4", "mountpoint":null, "model":null}, {"kname":"/dev/sdd13", "name":"/dev/sdd13", "size":29360128, "type":"part", "fstype":"ext4", "mountpoint":null, "model":null}, {"kname":"/dev/sdd14", "name":"/dev/sdd14", "size":45088768, "type":"part", "fstype":"ext4", "mountpoint":null, "model":null} ] }, {"kname":"/dev/sde", "name":"/dev/sde", "size":2684354560, "type":"disk", "fstype":null, "mountpoint":null, "model":"VBOX_HARDDISK", "children": [ {"kname":"/dev/sde1", "name":"/dev/sde1", "size":113246208, "type":"part", "fstype":"ntfs", "mountpoint":null, "model":null}, {"kname":"/dev/sde2", "name":"/dev/sde2", "size":67108864, "type":"part", "fstype":"vfat", "mountpoint":null, "model":null}, {"kname":"/dev/sde3", "name":"/dev/sde3", "size":2277507072, "type":"part", "fstype":"ntfs", "mountpoint":null, "model":null}, {"kname":"/dev/sde4", "name":"/dev/sde4", "size":224395264, "type":"part", "fstype":"ext4", "mountpoint":null, "model":null} ] }, {"kname":"/dev/sdf", "name":"/dev/sdf", "size":3221225472, "type":"disk", "fstype":null, "mountpoint":null, "model":"VBOX_HARDDISK", "children": [ {"kname":"/dev/sdf1", "name":"/dev/sdf1", "size":268435456, "type":"part", "fstype":"btrfs", "mountpoint":null, "model":null}, {"kname":"/dev/sdf2", "name":"/dev/sdf2", "size":147849216, "type":"part", "fstype":"ext2", "mountpoint":null, "model":null}, {"kname":"/dev/sdf3", "name":"/dev/sdf3", "size":1024, "type":"part", "fstype":null, "mountpoint":null, "model":null}, {"kname":"/dev/sdf5", "name":"/dev/sdf5", "size":52428800, "type":"part", "fstype":"ext4", "mountpoint":null, "model":null}, {"kname":"/dev/sdf6", "name":"/dev/sdf6", "size":34603008, "type":"part", "fstype":"ntfs", "mountpoint":null, "model":null}, {"kname":"/dev/sdf7", "name":"/dev/sdf7", "size":73400320, "type":"part", "fstype":"vfat", "mountpoint":null, "model":null}, {"kname":"/dev/sdf8", "name":"/dev/sdf8", "size":47185920, "type":"part", "fstype":"vfat", "mountpoint":null, "model":null}, {"kname":"/dev/sdf9", "name":"/dev/sdf9", "size":55574528, "type":"part", "fstype":"reiser4", "mountpoint":null, "model":null}, {"kname":"/dev/sdf10", "name":"/dev/sdf10", "size":35651584, "type":"part", "fstype":"reiserfs", "mountpoint":null, "model":null}, {"kname":"/dev/sdf11", "name":"/dev/sdf11", "size":36700160, "type":"part", "fstype":"swap", "mountpoint":null, "model":null}, {"kname":"/dev/sdf12", "name":"/dev/sdf12", "size":379584512, "type":"part", "fstype":"ntfs", "mountpoint":null, "model":null}, {"kname":"/dev/sdf13", "name":"/dev/sdf13", "size":45088768, "type":"part", "fstype":"udf", "mountpoint":null, "model":null}, {"kname":"/dev/sdf14", "name":"/dev/sdf14", "size":68157440, "type":"part", "fstype":"xfs", "mountpoint":null, "model":null}, {"kname":"/dev/sdf15", "name":"/dev/sdf15", "size":50331648, "type":"part", "fstype":null, "mountpoint":null, "model":null}, {"kname":"/dev/sdf16", "name":"/dev/sdf16", "size":40894464, "type":"part", "fstype":null, "mountpoint":null, "model":null}, {"kname":"/dev/sdf17", "name":"/dev/sdf17", "size":11534336, "type":"part", "fstype":"minix", "mountpoint":null, "model":null}, {"kname":"/dev/sdf18", "name":"/dev/sdf18", "size":62914560, "type":"part", "fstype":"f2fs", "mountpoint":null, "model":null}, {"kname":"/dev/sdf19", "name":"/dev/sdf19", "size":135266304, "type":"part", "fstype":"nilfs2", "mountpoint":null, "model":null}, {"kname":"/dev/sdf20", "name":"/dev/sdf20", "size":1656750080, "type":"part", "fstype":"ntfs", "mountpoint":null, "model":null} ] }, {"kname":"/dev/sdg", "name":"/dev/sdg", "size":3758096384, "type":"disk", "fstype":null, "mountpoint":null, "model":"VBOX_HARDDISK"}, {"kname":"/dev/sdh", "name":"/dev/sdh", "size":4294967296, "type":"disk", "fstype":null, "mountpoint":null, "model":"VBOX_HARDDISK", "children": [ {"kname":"/dev/sdh1", "name":"/dev/sdh1", "size":104857600, "type":"part", "fstype":"LVM2_member", "mountpoint":null, "model":null, "children": [ {"kname":"/dev/dm-1", "name":"/dev/mapper/vgtest1-lvtest1", "size":54525952, "type":"lvm", "fstype":"vfat", "mountpoint":null, "model":null} ] }, {"kname":"/dev/sdh2", "name":"/dev/sdh2", "size":104857600, "type":"part", "fstype":"LVM2_member", "mountpoint":null, "model":null, "children": [ {"kname":"/dev/dm-3", "name":"/dev/mapper/vgtest2-lvtest2", "size":54525952, "type":"lvm", "fstype":"ntfs", "mountpoint":null, "model":null} ] }, {"kname":"/dev/sdh3", "name":"/dev/sdh3", "size":104857600, "type":"part", "fstype":"LVM2_member", "mountpoint":null, "model":null, "children": [ {"kname":"/dev/dm-2", "name":"/dev/mapper/vgtest3-lvtest3", "size":54525952, "type":"lvm", "fstype":"ext4", "mountpoint":null, "model":null} ] } ] }, {"kname":"/dev/sdi", "name":"/dev/sdi", "size":8589934592, "type":"disk", "fstype":null, "mountpoint":null, "model":"VBOX_HARDDISK", "children": [ {"kname":"/dev/sdi1", "name":"/dev/sdi1", "size":536870912, "type":"part", "fstype":"vfat", "mountpoint":null, "model":null}, {"kname":"/dev/sdi2", "name":"/dev/sdi2", "size":255852544, "type":"part", "fstype":"ext2", "mountpoint":null, "model":null}, {"kname":"/dev/sdi3", "name":"/dev/sdi3", "size":7795113984, "type":"part", "fstype":"crypto_LUKS", "mountpoint":null, "model":null} ] }, {"kname":"/dev/sdj", "name":"/dev/sdj", "size":53687091200, "type":"disk", "fstype":null, "mountpoint":null, "model":"VBOX_HARDDISK", "children": [ {"kname":"/dev/sdj1", "name":"/dev/sdj1", "size":554696704, "type":"part", "fstype":"ntfs", "mountpoint":null, "model":null}, {"kname":"/dev/sdj2", "name":"/dev/sdj2", "size":104857600, "type":"part", "fstype":"vfat", "mountpoint":null, "model":null}, {"kname":"/dev/sdj3", "name":"/dev/sdj3", "size":16777216, "type":"part", "fstype":null, "mountpoint":null, "model":null}, {"kname":"/dev/sdj4", "name":"/dev/sdj4", "size":53008662528, "type":"part", "fstype":"ntfs", "mountpoint":null, "model":null} ] }, {"kname":"/dev/sdk", "name":"/dev/sdk", "size":1073741824, "type":"disk", "fstype":null, "mountpoint":null, "model":"VBOX_HARDDISK", "children": [ {"kname":"/dev/sdk1", "name":"/dev/sdk1", "size":1072693248, "type":"part", "fstype":"linux_raid_member", "mountpoint":null, "model":null, "children": [ {"kname":"/dev/md127", "name":"/dev/md127", "size":1071644672, "type":"raid1", "fstype":"ext4", "mountpoint":null, "model":null} ] } ] }, {"kname":"/dev/sdl", "name":"/dev/sdl", "size":1073741824, "type":"disk", "fstype":null, "mountpoint":null, "model":"VBOX_HARDDISK", "children": [ {"kname":"/dev/sdl1", "name":"/dev/sdl1", "size":1072693248, "type":"part", "fstype":"linux_raid_member", "mountpoint":null, "model":null, "children": [ {"kname":"/dev/md127", "name":"/dev/md127", "size":1071644672, "type":"raid1", "fstype":"ext4", "mountpoint":null, "model":null} ] } ] }, {"kname":"/dev/sdm", "name":"/dev/sdm", "size":1073741824, "type":"disk", "fstype":null, "mountpoint":null, "model":"VBOX_HARDDISK", "children": [ {"kname":"/dev/sdm1", "name":"/dev/sdm1", "size":1072693248, "type":"part", "fstype":"linux_raid_member", "mountpoint":null, "model":null, "children": [ {"kname":"/dev/md0", "name":"/dev/md0", "size":1071644672, "type":"raid1", "fstype":"ext4", "mountpoint":null, "model":null} ] } ] }, {"kname":"/dev/sdn", "name":"/dev/sdn", "size":1073741824, "type":"disk", "fstype":null, "mountpoint":null, "model":"VBOX_HARDDISK", "children": [ {"kname":"/dev/sdn1", "name":"/dev/sdn1", "size":1072693248, "type":"part", "fstype":"linux_raid_member", "mountpoint":null, "model":null, "children": [ {"kname":"/dev/md0", "name":"/dev/md0", "size":1071644672, "type":"raid1", "fstype":"ext4", "mountpoint":null, "model":null} ] } ] }, {"kname":"/dev/sdo", "name":"/dev/sdo", "size":1610612736, "type":"disk", "fstype":"ext4", "mountpoint":null, "model":"VBOX_HARDDISK"}, {"kname":"/dev/sr0", "name":"/dev/sr0", "size":805623808, "type":"rom", "fstype":"iso9660", "mountpoint":"/cdrom", "model":"VBOX_CD-ROM"} ] }""" lsblk_json_dict = json.loads(lsblk_json_output) input_blkid_string = """/dev/mapper/vgtest-lvtest: UUID="b9131c40-9742-416c-b019-8b11481a86ac" TYPE="ext4" /dev/sda1: UUID="5C2F5F000C198BC5" TYPE="ntfs" PTTYPE="dos" PARTUUID="9aa1db09-6b7c-45a6-b392-c14518730297" /dev/sdc: UUID="0818993868533997" TYPE="ntfs" PTTYPE="dos" /dev/sdd1: UUID="925ef222-bc6d-4e46-a31d-af3443821ed6" TYPE="ext4" PARTUUID="1dbd2bfc-01" /dev/sdd10: UUID="2708289c-c559-4d5c-bed1-98735f673bc0" TYPE="ext4" PARTUUID="1dbd2bfc-0a" /dev/sdd11: UUID="c1dc5a1d-d22b-4a64-933f-5495a454eb05" TYPE="ext4" PARTUUID="1dbd2bfc-0b" /dev/sdd12: UUID="47d1b78f-dd89-44d2-8af6-e7c01f1b635e" TYPE="ext4" PARTUUID="1dbd2bfc-0c" /dev/sdd13: UUID="39fb1fdd-2cb8-449d-8d15-3eb7ab682677" TYPE="ext4" PARTUUID="1dbd2bfc-0d" /dev/sdd14: UUID="f40bf79e-619e-422c-a546-ba78caa4ced7" TYPE="ext4" PARTUUID="1dbd2bfc-0e" /dev/sdd2: UUID="206b6149-a675-4149-b368-8d7f158d9da2" TYPE="ext4" PARTUUID="1dbd2bfc-02" /dev/sdd5: UUID="b8a96bc5-43c5-4180-a6c6-12466b544ced" TYPE="ext4" PARTUUID="1dbd2bfc-05" /dev/sdd6: UUID="0897426b-24ee-47ed-8c85-e034205a43aa" TYPE="ext4" PARTUUID="1dbd2bfc-06" /dev/sdd7: UUID="c4f8b878-1a4e-424b-93dd-17ea6d8e14d2" TYPE="ext4" PARTUUID="1dbd2bfc-07" /dev/sdd8: UUID="e4fa2ad0-a206-447e-9c14-829d52c10407" TYPE="ext4" PARTUUID="1dbd2bfc-08" /dev/sdd9: UUID="34648ad0-2fc1-47e1-8503-dc88b5152386" TYPE="ext4" PARTUUID="1dbd2bfc-09" /dev/sde1: UUID="39DE69624116B96D" TYPE="ntfs" PTTYPE="dos" PARTUUID="4c6ec2cf-8de5-43c0-9757-41a596de5486" /dev/sde2: UUID="B155-F891" TYPE="vfat" PARTUUID="be9f4179-560c-4bc9-8366-ae214f69a16e" /dev/sde3: UUID="286C1DA536C63454" TYPE="ntfs" PTTYPE="dos" PARTUUID="363bdd4e-d6a1-42e6-a4da-616cd3e46952" /dev/sde4: UUID="11d0533c-feba-4a07-ae30-7ff3720a051e" TYPE="ext4" PARTUUID="ced0279d-0096-4ebe-8425-4c6c9d46a4d2" /dev/sdf1: UUID="c297cbb6-acb5-4d6a-9e34-af7558e120a0" UUID_SUB="695a452a-c43c-477c-8d5c-a21802e18091" TYPE="btrfs" PARTUUID="43c18652-01" /dev/sdf12: UUID="023E47301DBC9964" TYPE="ntfs" PTTYPE="dos" PARTUUID="43c18652-0c" /dev/sdf13: UUID="5f4b3897b65975a8" LABEL="LinuxUDF" TYPE="udf" PARTUUID="43c18652-0d" /dev/sdf17: TYPE="minix" PARTUUID="43c18652-11" /dev/sdf18: UUID="d73d2f4f-fba2-43ef-ad7a-c5fe877bf8a9" TYPE="f2fs" PARTUUID="43c18652-12" /dev/sdf2: UUID="04944da2-d784-4c9f-b143-060d2776c4b1" TYPE="ext2" PARTUUID="43c18652-02" /dev/sdf20: UUID="718E7F8E7F42D1B8" TYPE="ntfs" PTTYPE="dos" PARTUUID="43c18652-14" /dev/sdf5: UUID="a2f8e4d6-ac83-4c57-a046-b7122c0398d5" TYPE="ext4" PARTUUID="43c18652-05" /dev/sdf6: UUID="1297CD00121D448B" TYPE="ntfs" PTTYPE="dos" PARTUUID="43c18652-06" /dev/sdf7: SEC_TYPE="msdos" UUID="CF57-1227" TYPE="vfat" PARTUUID="43c18652-07" /dev/sdf8: UUID="CF6A-B2D0" TYPE="vfat" PARTUUID="43c18652-08" /dev/sdi1: UUID="F5A2-3D31" TYPE="vfat" PARTUUID="b227e8b3-c8ea-448f-9657-53670575e6a8" /dev/sdi2: UUID="80a2000c-c375-4a74-b6f9-2f1e1c7a8958" TYPE="ext2" PARTUUID="a3b889cb-31af-469d-a584-34edb323c62a" /dev/sdj1: LABEL="Recovery" UUID="5C22168F22166E70" TYPE="ntfs" PARTLABEL="Basic data partition" PARTUUID="e3e94ae6-c2ab-495a-a955-a32140f56c2a" /dev/sdj2: UUID="2C16-C81E" TYPE="vfat" PARTLABEL="EFI system partition" PARTUUID="7423670f-d0c3-4724-82f7-3185350a1bf7" /dev/sdj4: UUID="DA40176E4017511D" TYPE="ntfs" PARTLABEL="Basic data partition" PARTUUID="1f1c6171-d10c-44c0-ba9b-e12995d7f4da" /dev/sdk1: UUID="b4b3109e-816d-bcee-66c4-151e60fd8e23" UUID_SUB="cbb347a0-e478-39be-5dae-8b2955375ff6" LABEL="ubuntu:0" TYPE="linux_raid_member" PARTUUID="edb03a25-01" /dev/sdl1: UUID="b4b3109e-816d-bcee-66c4-151e60fd8e23" UUID_SUB="17ee1620-2ccc-c266-1cd8-a567f6896d7a" LABEL="ubuntu:0" TYPE="linux_raid_member" PARTUUID="c6774609-01" /dev/sr0: UUID="2020-09-04-10-27-14-00" LABEL="Rescuezilla" TYPE="iso9660" PTTYPE="PMBR" /dev/loop0: TYPE="squashfs" /dev/sdb: UUID="i20UTQ-OaX3-c6nB-CiBv-Gav1-hgVf-tEkO2W" TYPE="LVM2_member" /dev/sdf9: UUID="0ab2eb0a-6c94-4f78-8206-7d725bbeb4e5" TYPE="reiser4" PARTUUID="43c18652-09" /dev/sdf10: UUID="d230da2e-5359-43cb-827e-40c48a0a572b" TYPE="reiserfs" PARTUUID="43c18652-0a" /dev/sdf11: UUID="217c8359-2ce6-4d07-bc44-4dcae84bc089" TYPE="swap" PARTUUID="43c18652-0b" /dev/sdf14: UUID="3211b89b-b5ce-461d-8e6e-8acfaaa4bb28" TYPE="xfs" PARTUUID="43c18652-0e" /dev/sdf19: UUID="957acceb-12f3-4574-ac1f-fb6e53f1a22f" TYPE="nilfs2" PARTUUID="43c18652-13" /dev/sdh1: UUID="aNcRGF-4HyS-UoFu-aNpE-tXKI-nyeu-LiKaTm" TYPE="LVM2_member" PARTUUID="c64c40a3-4eec-4b86-820a-f8068ad3f686" /dev/sdh2: UUID="cwuIbV-pb5s-9whu-eezX-WiZU-4wlY-gvuLZS" TYPE="LVM2_member" PARTUUID="45e285ea-caa3-4fc7-a7fe-9727d3198f09" /dev/sdh3: UUID="jStAm5-tt1J-6uRa-HElo-EROm-DgjD-B5OJ3B" TYPE="LVM2_member" PARTUUID="4a014da5-75ca-45b2-9b62-7e3380c14570" /dev/sdi3: UUID="17147edc-1e54-4300-b47a-01b138581512" TYPE="crypto_LUKS" PARTUUID="5f64fbd4-dcad-4467-8a8b-f3e009871661" /dev/md127: UUID="0ca32d11-af2c-4512-9e68-bef318870149" TYPE="ext4" /dev/mapper/vgtest1-lvtest1: SEC_TYPE="msdos" UUID="0559-959C" TYPE="vfat" /dev/mapper/vgtest3-lvtest3: UUID="846a2cbd-b040-4afd-bb1c-8ecd6e15f0c2" TYPE="ext4" /dev/mapper/vgtest2-lvtest2: UUID="588E895406ECC468" TYPE="ntfs" PTTYPE="dos" /dev/sdf15: PARTUUID="43c18652-0f" /dev/sdf16: PARTUUID="43c18652-10" /dev/sdj3: PARTLABEL="Microsoft reserved partition" PARTUUID="229ef65d-3315-4824-945e-9719feda2f42" /dev/sdm1: UUID="75515f3b-95ea-ef00-e327-c48e2784e416" UUID_SUB="52b46420-82ff-7e66-ff87-4195a846f804" LABEL="ubuntu:0" TYPE="linux_raid_member" PARTUUID="e02572d4-01" /dev/sdn1: UUID="75515f3b-95ea-ef00-e327-c48e2784e416" UUID_SUB="5a61afd1-e3eb-d319-f6ca-a0135c0889de" LABEL="ubuntu:0" TYPE="linux_raid_member" PARTUUID="1e066523-01" /dev/md0: UUID="42ba6b53-6752-4ca7-b5a7-95a5e766ce97" BLOCK_SIZE="4096" TYPE="ext4" /dev/sdo: UUID="642af36d-7695-4376-a6f9-a35a15552e33" BLOCK_SIZE="4096" TYPE="ext4" """ blkid_dict = Blkid.parse_blkid_output(input_blkid_string) os_prober_contents = """/dev/sdc2@/efi/Microsoft/Boot/bootmgfw.efi:Windows Boot Manager:Windows:efi /dev/sdd1:Debian GNU/Linux 10 (buster):Debian:linux""" osprober_dict = OsProber.parse_os_prober_output(os_prober_contents) input_parted_gpt_string = """Model: ATA VBOX HARDDISK (scsi) Disk /dev/sde: 2684354560B Sector size (logical/physical): 512B/512B Partition Table: gpt Disk Flags: Number Start End Size File system Name Flags 1 1048576B 114294783B 113246208B ntfs msftdata 2 114294784B 181403647B 67108864B fat32 msftdata 3 181403648B 2458910719B 2277507072B ntfs msftdata 4 2458910720B 2683305983B 224395264B ext4""" parted_dict_dict['/dev/sde'] = Parted.parse_parted_output( input_parted_gpt_string) input_sfdisk_gpt_string = """label: gpt label-id: 5FA01E95-F3E8-4B92-845B-843609E4EF0D device: /dev/sde unit: sectors first-lba: 34 last-lba: 5242846 /dev/sde1 : start= 2048, size= 221184, type=EBD0A0A2-B9E5-4433-87C0-68B6B72699C7, uuid=4C6EC2CF-8DE5-43C0-9757-41A596DE5486 /dev/sde2 : start= 223232, size= 131072, type=EBD0A0A2-B9E5-4433-87C0-68B6B72699C7, uuid=BE9F4179-560C-4BC9-8366-AE214F69A16E /dev/sde3 : start= 354304, size= 4448256, type=EBD0A0A2-B9E5-4433-87C0-68B6B72699C7, uuid=363BDD4E-D6A1-42E6-A4DA-616CD3E46952 /dev/sde4 : start= 4802560, size= 438272, type=0FC63DAF-8483-4772-8E79-3D69D8477DE4, uuid=CED0279D-0096-4EBE-8425-4C6C9D46A4D2""" sfdict_dict_dict['/dev/sde'] = Sfdisk.parse_sfdisk_dump_output( input_sfdisk_gpt_string) input_parted_mbr_string = """Model: ATA VBOX HARDDISK (scsi) Disk /dev/sdd: 2147483648B Sector size (logical/physical): 512B/512B Partition Table: msdos Disk Flags: Number Start End Size Type File system Flags 4 1048576B 2100297727B 2099249152B extended 14 2097152B 47185919B 45088768B logical ext4 13 48234496B 77594623B 29360128B logical ext4 5 78643200B 91226111B 12582912B logical ext4 9 92274688B 113246207B 20971520B logical ext4 6 114294784B 118489087B 4194304B logical ext2 7 119537664B 147849215B 28311552B logical ext4 8 148897792B 153092095B 4194304B logical ext2 10 154140672B 238026751B 83886080B logical ext4 11 239075328B 311427071B 72351744B logical ext4 12 312475648B 331350015B 18874368B logical ext4 1 2100297728B 2103443455B 3145728B primary ext2 2 2103443456B 2147483647B 44040192B primary ext4""" parted_dict_dict['/dev/sdd'] = Parted.parse_parted_output( input_parted_mbr_string) input_parted_sdm_string = """Model: ATA VBOX HARDDISK (scsi) Disk /dev/sdm: 1073741824B Sector size (logical/physical): 512B/512B Partition Table: msdos Disk Flags: Number Start End Size Type File system Flags 1 1048576B 1073741823B 1072693248B primary raid """ parted_dict_dict['/dev/sdm'] = Parted.parse_parted_output( input_parted_sdm_string) input_parted_sdn_string = """ Model: ATA VBOX HARDDISK (scsi) Disk /dev/sdn: 1073741824B Sector size (logical/physical): 512B/512B Partition Table: msdos Disk Flags: Number Start End Size Type File system Flags 1 1048576B 1073741823B 1072693248B primary raid """ parted_dict_dict['/dev/sdn'] = Parted.parse_parted_output( input_parted_sdn_string) input_parted_fs_directly_on_disk_string = """ Model: ATA VBOX HARDDISK (scsi) Disk /dev/sdo: 1610612736B Sector size (logical/physical): 512B/512B Partition Table: loop Disk Flags: Number Start End Size File system Flags 1 0B 1610612735B 1610612736B ext4 """ parted_dict_dict['/dev/sdo'] = Parted.parse_parted_output( input_parted_fs_directly_on_disk_string) input_sfdisk_sdm_string = """label: dos label-id: 0xe02572d4 device: /dev/sdm unit: sectors sector-size: 512 /dev/sdm1 : start= 2048, size= 2095104, type=fd""" sfdict_dict_dict['/dev/sdm'] = Sfdisk.parse_sfdisk_dump_output( input_sfdisk_sdm_string) input_sfdisk_sdn_string = """label: dos label-id: 0x1e066523 device: /dev/sdn unit: sectors sector-size: 512 /dev/sdn1 : start= 2048, size= 2095104, type=fd""" sfdict_dict_dict['/dev/sdn'] = Sfdisk.parse_sfdisk_dump_output( input_sfdisk_sdn_string) input_sfdisk_mbr_string = """label: dos label-id: 0x1dbd2bfc device: /dev/sdd unit: sectors /dev/sdd1 : start= 4102144, size= 6144, type=83 /dev/sdd2 : start= 4108288, size= 86016, type=83 /dev/sdd4 : start= 2048, size= 4100096, type=5 /dev/sdd5 : start= 153600, size= 24576, type=83 /dev/sdd6 : start= 223232, size= 8192, type=83 /dev/sdd7 : start= 233472, size= 55296, type=83 /dev/sdd8 : start= 290816, size= 8192, type=83 /dev/sdd9 : start= 180224, size= 40960, type=83 /dev/sdd10 : start= 301056, size= 163840, type=83 /dev/sdd11 : start= 466944, size= 141312, type=83 /dev/sdd12 : start= 610304, size= 36864, type=83 /dev/sdd13 : start= 94208, size= 57344, type=83 /dev/sdd14 : start= 4096, size= 88064, type=83 """ sfdict_dict_dict['/dev/sdd'] = Sfdisk.parse_sfdisk_dump_output( input_sfdisk_mbr_string) pp = pprint.PrettyPrinter(indent=4) combined_drive_state_dict = CombinedDriveState.construct_combined_drive_state_dict( lsblk_json_dict, blkid_dict, osprober_dict, parted_dict_dict, sfdict_dict_dict) pp.pprint(combined_drive_state_dict) CombinedDriveState.get_first_partition( combined_drive_state_dict['/dev/sdd']['partitions'])
def _do_drive_query(self): env_C_locale = Utility.get_env_C_locale() drive_query_start_time = datetime.now() GLib.idle_add(self.please_wait_popup.set_secondary_label_text, _("Unmounting: {path}").format(path=IMAGE_EXPLORER_DIR)) returncode, failed_message = ImageExplorerManager._do_unmount( IMAGE_EXPLORER_DIR) if not returncode: GLib.idle_add( self.error_message_callback, False, _("Unable to shutdown Image Explorer") + "\n\n" + failed_message) GLib.idle_add(self.please_wait_popup.destroy) return if self.is_stop_requested(): GLib.idle_add(self.error_message_callback, False, _("Operation cancelled by user.")) return GLib.idle_add( self.please_wait_popup.set_secondary_label_text, _("Unmounting: {path}").format(path=RESCUEZILLA_MOUNT_TMP_DIR)) returncode, failed_message = ImageExplorerManager._do_unmount( RESCUEZILLA_MOUNT_TMP_DIR) if not returncode: GLib.idle_add( self.error_message_callback, False, _("Unable to unmount {path}").format( path=RESCUEZILLA_MOUNT_TMP_DIR) + "\n\n" + failed_message) GLib.idle_add(self.please_wait_popup.destroy) return if self.is_stop_requested(): GLib.idle_add(self.error_message_callback, False, _("Operation cancelled by user.")) return lsblk_cmd_list = [ "lsblk", "-o", "KNAME,NAME,SIZE,TYPE,FSTYPE,MOUNTPOINT,MODEL,SERIAL", "--paths", "--bytes", "--json" ] blkid_cmd_list = ["blkid"] os_prober_cmd_list = ["os-prober"] lsblk_json_dict = {} blkid_dict = {} os_prober_dict = {} parted_dict_dict = collections.OrderedDict([]) sfdisk_dict_dict = collections.OrderedDict([]) # Clonezilla combines drive, partition and filesystem from multiple data sources (lsblk, blkid, parted etc) # Rescuezilla continues this approach to reach best possible Clonezilla compatibility. # # However this sequential querying is slow. A parallel approach should be in theory much faster (but might be # less reliable if internal commands are creating file locks etc.) # # In practice, the sequential approach was about 25% faster than a first-cut (polling-based) parallel approach. # Parallel mode currently disabled, but kept for further development/analysis. mode = "sequential-drive-query" if mode == "sequential-drive-query": print("Running drive query in sequential mode") # TODO: Run with Utility.interruptable_run() so that even long-lived commands can have a signal sent to it # to shutdown early. # Not checking return codes here because Clonezilla does not, and some of these commands are expected to # fail. The Utility.run() command prints the output to stdout. GLib.idle_add(self.please_wait_popup.set_secondary_label_text, _("Running: {app}").format(app="lsblk")) process, flat_command_string, fail_description = Utility.run( "lsblk", lsblk_cmd_list, use_c_locale=True) lsblk_json_dict = json.loads(process.stdout) if self.is_stop_requested(): GLib.idle_add(self.error_message_callback, False, _("Operation cancelled by user.")) return GLib.idle_add(self.please_wait_popup.set_secondary_label_text, _("Running: {app}").format(app="blkid")) process, flat_command_string, fail_description = Utility.run( "blkid", blkid_cmd_list, use_c_locale=True) blkid_dict = Blkid.parse_blkid_output(process.stdout) if self.is_stop_requested(): GLib.idle_add(self.error_message_callback, False, _("Operation cancelled by user.")) return GLib.idle_add(self.please_wait_popup.set_secondary_label_text, _("Running: {app}").format(app="os-prober")) # Use os-prober to get OS information (running WITH original locale information process, flat_command_string, fail_description = Utility.run( "osprober", os_prober_cmd_list, use_c_locale=True) os_prober_dict = OsProber.parse_os_prober_output(process.stdout) if self.is_stop_requested(): GLib.idle_add(self.error_message_callback, False, _("Operation cancelled by user.")) return for lsblk_dict in lsblk_json_dict['blockdevices']: partition_longdevname = lsblk_dict['name'] print("Going to run parted and sfdisk on " + partition_longdevname) try: GLib.idle_add( self.please_wait_popup.set_secondary_label_text, _("Running {app} on {device}").format( app="parted", device=partition_longdevname)) process, flat_command_string, fail_description = Utility.run( "parted", self._get_parted_cmd_list(partition_longdevname), use_c_locale=True) if "unrecognized disk label" not in process.stderr: parted_dict_dict[ partition_longdevname] = Parted.parse_parted_output( process.stdout) else: print("Parted says " + process.stderr) if self.is_stop_requested(): GLib.idle_add(self.error_message_callback, False, _("Operation cancelled by user.")) return GLib.idle_add( self.please_wait_popup.set_secondary_label_text, _("Running {app} on {device}").format( app="sfdisk", device=partition_longdevname)) process, flat_command_string, fail_description = Utility.run( "sfdisk", self._get_sfdisk_cmd_list(partition_longdevname), use_c_locale=True) sfdisk_dict_dict[ partition_longdevname] = Sfdisk.parse_sfdisk_dump_output( process.stdout) if self.is_stop_requested(): GLib.idle_add(self.error_message_callback, False, _("Operation cancelled by user.")) return except Exception: print("Could run run parted on " + partition_longdevname) elif mode == "parallel-drive-query": print("Running drive query in parallel mode") # Launch drive query in parallel. Parallel Python subprocess.Popen() approach adapted from [1] # [1] https://stackoverflow.com/a/636601 cmd_dict = { ('lsblk', ""): subprocess.Popen(lsblk_cmd_list, env=env_C_locale, stdout=subprocess.PIPE, stderr=subprocess.PIPE, encoding="utf-8", universal_newlines=True), ('blkid', ""): subprocess.Popen(blkid_cmd_list, env=env_C_locale, stdout=subprocess.PIPE, stderr=subprocess.PIPE, encoding="utf-8", universal_newlines=True), ('os_prober', ""): subprocess.Popen(os_prober_cmd_list, env=env_C_locale, stdout=subprocess.PIPE, stderr=subprocess.PIPE, encoding="utf-8", universal_newlines=True), } while cmd_dict: print("drive_query_process is length " + str(len(cmd_dict)) + " with contents " + str(cmd_dict)) for key in list(cmd_dict.keys()): proc = cmd_dict[key] retcode = proc.poll() if retcode is not None: # Process finished. cmd_dict.pop(key, None) if key[0] == "lsblk" and retcode == 0: # lsblk is complete, partition information can be used to launch the parted/sfdisk lsblk_json_dict = json.loads(proc.stdout.read()) for lsblk_dict in lsblk_json_dict['blockdevices']: partition_longdevname = lsblk_dict['name'] print("Launching parted and sfdisk on " + partition_longdevname) try: cmd_dict[("parted", partition_longdevname )] = subprocess.Popen( self._get_parted_cmd_list( partition_longdevname), env=env_C_locale, encoding="utf-8", universal_newlines=True) cmd_dict[("sfdisk", partition_longdevname )] = subprocess.Popen( self._get_sfdisk_cmd_list( partition_longdevname), env=env_C_locale, encoding="utf-8", universal_newlines=True) except Exception: print("Could launch sfdisk or parted on " + partition_longdevname) elif key[0] == "blkid" and retcode == 0: blkid_dict = Blkid.parse_blkid_output( proc.stdout.read()) elif key[0] == "osprober" and retcode == 0: os_prober_dict = OsProber.parse_os_prober_output( proc.stdout.read()) elif key[ 0] == "sfdisk" and retcode == 0 and proc.stdout is not None: sfdisk_dict_dict[ key[1]] = Sfdisk.parse_sfdisk_dump_output( proc.stdout.read()) elif key[ 0] == "parted" and retcode == 0 and proc.stdout is not None: if proc.stderr is not None: stderr = proc.stderr.read() print("parted with key " + str(key) + " had stderr " + stderr) if "unrecognized disk label" not in stderr: parted_dict_dict[ key[1]] = Parted.parse_parted_output( proc.stdout.read()) else: print( "COULD NOT PROCESS process launched with key " + str(key) + " return code" + str(retcode)) if proc.stdout is not None: print("stdout:" + proc.stdout.read()) if proc.stderr is not None: print(" stderr:" + proc.stderr.read()) else: # No process is done, wait a bit and check again. time.sleep(0.1) continue else: raise Exception("Invalid drive query mode") self.drive_state = CombinedDriveState.construct_combined_drive_state_dict( lsblk_json_dict, blkid_dict, os_prober_dict, parted_dict_dict, sfdisk_dict_dict) pp = pprint.PrettyPrinter(indent=4) pp.pprint(self.drive_state) drive_query_end_time = datetime.now() print("Drive query took: " + str((drive_query_end_time - drive_query_start_time))) GLib.idle_add(self.populate_drive_selection_table)
def __init__(self, absolute_clonezilla_img_path, enduser_filename): self.absolute_path = absolute_clonezilla_img_path self.enduser_filename = enduser_filename self.warning_dict = {} statbuf = os.stat(self.absolute_path) self.last_modified_timestamp = format_datetime( datetime.fromtimestamp(statbuf.st_mtime)) print("Last modified timestamp " + self.last_modified_timestamp) self.image_format = "CLONEZILLA_FORMAT" dir = Path(absolute_clonezilla_img_path).parent.as_posix() print("Clonezilla directory : " + dir) self.short_device_node_partition_list = [] self.short_device_node_disk_list = [] self.lvm_vg_dev_dict = {} self.lvm_logical_volume_dict = {} self.dev_fs_dict = {} self.is_needs_decryption = False self.ecryptfs_info_dict = None ecryptfs_info_filepath = os.path.join(dir, "ecryptfs.info") if isfile(ecryptfs_info_filepath): try: # ecryptfs.info is plain text when the directory is encrypted and produces Input/Output error when decrypted Utility.read_file_into_string(ecryptfs_info_filepath) self.is_needs_decryption = True except: self.is_needs_decryption = False if self.is_needs_decryption: self.ecryptfs_info_dict = Ecryptfs.parse_ecryptfs_info( Utility.read_file_into_string(ecryptfs_info_filepath)) self.short_device_node_disk_list = self.ecryptfs_info_dict['disk'] self.short_device_node_partition_list = self.ecryptfs_info_dict[ 'parts'] else: # The 'parts' file contains space separated short partition device nodes (eg 'sda1 sda2 sda7') corresponding # to the partitions that were selected by the user during the original backup. parts_filepath = os.path.join(dir, "parts") if isfile(parts_filepath): self.short_device_node_partition_list = Utility.read_space_delimited_file_into_list( parts_filepath) else: # Every Clonezilla image encountered so far has a 'parts' file, so the backup is considered invalid # if none is present. raise FileNotFoundError("Unable to locate " + parts_filepath + " or file is encrypted") # The 'disk' file can contain *multiple* space-separated short device nodes (eg 'sda sdb'), but most # users will only backup one drive at a time using Clonezilla. # # Clonezilla images created using 'saveparts' function (rather than 'savedisk') does NOT have this file. disk_filepath = os.path.join(dir, "disk") if isfile(disk_filepath): self.short_device_node_disk_list = Utility.read_space_delimited_file_into_list( disk_filepath) else: print("Unable to locate " + disk_filepath) # Clonezilla images created using 'saveparts' (rather than 'savedisks') don't have this file. However, if # 'saveparts' is used on partitions that multiple disks that each contain partition tables then it's vital # that the short device nodes information is extracted in order for the user to be able to restoring their # intended partition table. # # parted_absolute_path_list = glob.glob( os.path.join(dir, "*-pt.parted")) for parted_absolute_path in parted_absolute_path_list: self.short_device_node_disk_list.append( re.sub('-pt.parted', '', os.path.basename(parted_absolute_path))) if len(self.short_device_node_disk_list) == 0: # If the device list is still empty it must be due to using 'saveparts' on a drive without a # partition table. Append these device odds onto the disk list for convenience. self.short_device_node_disk_list += self.short_device_node_partition_list # TODO: Re-evaluate the need to parse this file, as far as I can tell all the information can be extracted # from the partition information. # The 'dev-fs.list' file contains the association between device nodes and the filesystems (eg '/dev/sda2 ext4') dev_fs_list_filepath = os.path.join(dir, "dev-fs.list") if isfile(dev_fs_list_filepath): self.dev_fs_dict = ClonezillaImage.parse_dev_fs_list_output( Utility.read_file_into_string(dev_fs_list_filepath)) else: # Not raising exception because older Clonezilla images don't have this file. print("Unable to locate " + dev_fs_list_filepath) # The 'blkid.list' file provides a snapshot of the partitions on the system at the time of backup. This data # is not particularly relevant during a restore operation, except potentially for eg, UUID. # # Danger: Do not mistake this structure for the current system's 'blkid' information. # TODO: Re-evaluate the need to parse this file. The risk of mistaken usage may outweigh its usefulness. self.blkid_dict = {} blkid_list_filepath = os.path.join(dir, "blkid.list") if isfile(blkid_list_filepath): self.blkid_dict = Blkid.parse_blkid_output( Utility.read_file_into_string( os.path.join(dir, blkid_list_filepath))) else: # Not raising exception because older Clonezilla images don't have this file. print("Unable to locate " + blkid_list_filepath) # The 'lvm_vg_dev.list' file contains the association between an LVM VG (Logical Volume Manager volume group) # with a name eg 'vgtest', the LVM PV (physical volume) with a UUID name, and the device node that the physical # volume resides on eg, /dev/sdb. lvm_vg_dev_list_filepath = os.path.join(dir, "lvm_vg_dev.list") if isfile( lvm_vg_dev_list_filepath) and not self.is_needs_decryption: self.lvm_vg_dev_dict = Lvm.parse_volume_group_device_list_string( Utility.read_file_into_string(lvm_vg_dev_list_filepath)) else: print("No LVM volume group to device file detected in image") # The 'lvm_logv.list' file contains the association between device nodes and the filesystems (eg '/dev/sda2 ext4') lvm_logv_list_filepath = os.path.join(dir, "lvm_logv.list") if isfile(lvm_logv_list_filepath) and not self.is_needs_decryption: self.lvm_logical_volume_dict = Lvm.parse_logical_volume_device_list_string( Utility.read_file_into_string(lvm_logv_list_filepath)) else: print("No LVM logical volume file detected in image") self.parted_dict_dict = {} self.sfdisk_dict_dict = {} self.mbr_dict_dict = {} self.ebr_dict_dict = {} self.size_bytes = 0 self.enduser_readable_size = "unknown" for short_disk_device_node in self.short_device_node_disk_list: self.size_bytes = 0 # Clonezilla -pt.parted file lists size in sectors, rather than bytes (or end-user readable KB/MB/GB/TB as # Clonezilla's -pt.parted.compact file) parted_filepath = os.path.join( dir, short_disk_device_node + "-pt.parted") if isfile(parted_filepath) and not self.is_needs_decryption: self.parted_dict_dict[ short_disk_device_node] = Parted.parse_parted_output( Utility.read_file_into_string(parted_filepath)) if 'capacity' in self.parted_dict_dict[short_disk_device_node] and 'logical_sector_size' in \ self.parted_dict_dict[short_disk_device_node]: self.size_bytes = self.parted_dict_dict[short_disk_device_node]['capacity'] * \ self.parted_dict_dict[short_disk_device_node]['logical_sector_size'] else: raise Exception( "Unable to calculate disk capacity using " + parted_filepath + ": " + str(self.parted_dict_dict[short_disk_device_node])) else: # Do not raise exception because parted partition table is not present when using 'saveparts' print("Unable to locate " + parted_filepath + " or file is encrypted") if self.ecryptfs_info_dict is not None and 'size' in self.ecryptfs_info_dict.keys( ): self.enduser_readable_size = self.ecryptfs_info_dict[ 'size'].strip("_") else: # Covert size in bytes to KB/MB/GB/TB as relevant self.enduser_readable_size = size(int(self.size_bytes), system=alternative) sfdisk_filepath = os.path.join( dir, short_disk_device_node + "-pt.sf") if isfile(sfdisk_filepath) and not self.is_needs_decryption: sfdisk_string = Utility.read_file_into_string( sfdisk_filepath) self.sfdisk_dict_dict[short_disk_device_node] = { 'absolute_path': sfdisk_filepath, 'sfdisk_dict': Sfdisk.parse_sfdisk_dump_output(sfdisk_string), 'sfdisk_file_length': len(sfdisk_string) } else: # Do not raise exception because sfdisk partition table is often missing using Clonezilla image format, # as `sfdisk --dump` fails for disks without a partition table. print("Unable to locate " + sfdisk_filepath + " or file is encrypted") # There is a maximum of 1 MBR per drive (there can be many drives). Master Boot Record (EBR) is never # listed in 'parts' list. mbr_glob_list = glob.glob( os.path.join(dir, short_disk_device_node) + "*-mbr") for absolute_mbr_filepath in mbr_glob_list: short_mbr_device_node = basename(absolute_mbr_filepath).split( "-mbr")[0] self.mbr_dict_dict[short_disk_device_node] = { 'short_device_node': short_mbr_device_node, 'absolute_path': absolute_mbr_filepath } # There is a maximum of 1 EBR per drive (there can be many drives). Extended Boot Record (EBR) is never # listed in 'parts' list. ebr_glob_list = glob.glob( os.path.join(dir, short_disk_device_node) + "*-ebr") for absolute_ebr_filepath in ebr_glob_list: short_ebr_device_node = basename(absolute_ebr_filepath).split( "-ebr")[0] self.ebr_dict_dict[short_disk_device_node] = { 'short_device_node': short_ebr_device_node, 'absolute_path': absolute_ebr_filepath } self.image_format_dict_dict = collections.OrderedDict([]) # Loops over the partitions listed in the 'parts' file for short_partition_device_node in self.short_device_node_partition_list: has_found_atleast_one_associated_image = False # For standard MBR and GPT partitions, the partition key listed in the 'parts' file has a directly # associated backup image, so check for this. image_format_dict = ClonezillaImage.scan_backup_image( dir, short_partition_device_node, self.is_needs_decryption) # If no match found check the LVM (Logical Volume Manager) if len(image_format_dict) == 0: # Loop over all the volume groups (if any) for vg_name in self.lvm_vg_dev_dict.keys(): # TODO: Evalulate if there are Linux multipath device nodes that hold LVM Physical Volumes. # TODO: May need to adjust for multipath device node by replacing "/" with "-" for this node. pv_short_device_node = re.sub( '/dev/', '', self.lvm_vg_dev_dict[vg_name]['device_node']) # Check if there is an associated LVM Physical Volume (PV) present if short_partition_device_node == pv_short_device_node: # Yes, the partition being analysed is associated with an LVM physical volume that contains # an LVM Volume Group. Now determine all backup images associated to Logical Volumes that # reside within this Volume Group. for lv_path in self.lvm_logical_volume_dict.keys(): candidate_lv_path_prefix = "/dev/" + vg_name + "/" # Eg, "/dev/cl/root".startswith("/dev/cl") if lv_path.startswith(candidate_lv_path_prefix): # Found a logical volume. Note: There may be more than one LV associated with an VG # Set the scan prefix for the backup image to eg "cl-root" logical_volume_scan_key = re.sub( '/', '-', re.sub('/dev/', '', lv_path)) image_format_dict = ClonezillaImage.scan_backup_image( dir, logical_volume_scan_key, self.is_needs_decryption) if len(image_format_dict) != 0: image_format_dict[ 'is_lvm_logical_volume'] = True image_format_dict[ 'volume_group_name'] = vg_name image_format_dict['physical_volume_long_device_node'] = \ self.lvm_vg_dev_dict[vg_name]['device_node'] image_format_dict[ 'logical_volume_long_device_node'] = lv_path self.image_format_dict_dict[ logical_volume_scan_key] = image_format_dict has_found_atleast_one_associated_image = True else: has_found_atleast_one_associated_image = True self.image_format_dict_dict[ short_partition_device_node] = image_format_dict if not has_found_atleast_one_associated_image: self.image_format_dict_dict[short_partition_device_node] = { 'type': "missing", 'prefix': short_partition_device_node, 'is_lvm_logical_volume': False } # TODO: Improve conversion between /dev/ nodes to short dev node. long_partition_key = "/dev/" + short_partition_device_node if long_partition_key in self.dev_fs_dict.keys(): # Annotate have filesystem information from dev-fs.list file. This case is expected when during a # backup Clonezilla or Rescuezilla failed to successfully image the filesystem, but may have # succeeded for other filesystems. fs = self.dev_fs_dict[long_partition_key] self.image_format_dict_dict[short_partition_device_node][ 'filesystem'] = fs # TODO: Consider removing warning_dict as image_format_dict is sufficient. self.warning_dict[short_partition_device_node] = fs elif self.is_needs_decryption: self.warning_dict[short_partition_device_node] = _( "Needs decryption") else: self.warning_dict[short_partition_device_node] = _( "Unknown filesystem") # Unfortunately swap partitions are not listed in the 'parts' file. There does not appear to be any alternative # but scanning for the swap partitions and add them to the existing partitions, taking care to avoid duplicates # by rescanning what has already been scanned due to listing as an LVM logical volume. swap_partition_info_glob_list = glob.glob( os.path.join(dir, "swappt-*.info")) for swap_partition_info_glob in swap_partition_info_glob_list: key = Swappt.get_short_device_from_swappt_info_filename( swap_partition_info_glob) already_scanned = False for image_format_dict_key in self.image_format_dict_dict.keys(): if key == self.image_format_dict_dict[image_format_dict_key][ "prefix"]: already_scanned = True break if not already_scanned and not self.is_needs_decryption: self.image_format_dict_dict[key] = Swappt.parse_swappt_info( Utility.read_file_into_string(swap_partition_info_glob)) self.image_format_dict_dict[key]['type'] = "swap" self.image_format_dict_dict[key]['prefix'] = key self.image_format_dict_dict[key][ 'is_lvm_logical_volume'] = False pp = pprint.PrettyPrinter(indent=4) pp.pprint(self.image_format_dict_dict)
def __init__(self, absolute_clonezilla_img_path, enduser_filename, dir, ecryptfs_info_dict, is_needs_decryption, short_disk_device_node, short_device_node_partition_list, is_display_multidisk, enduser_drive_number): self.absolute_path = absolute_clonezilla_img_path self.ecryptfs_info_dict = ecryptfs_info_dict self.is_needs_decryption = is_needs_decryption self.short_disk_device_node = short_disk_device_node self.is_display_multidisk = is_display_multidisk self.enduser_drive_number = enduser_drive_number self.user_notes = "" self.warning_dict = {} notes_filepath = os.path.join(dir, "rescuezilla.description.txt") if os.path.exists(notes_filepath): self.user_notes = Utility.read_file_into_string(notes_filepath) if is_display_multidisk: multidisk_desc = _("Drive {drive_number}".format( drive_number=str(self.enduser_drive_number))) self.enduser_filename = enduser_filename + " (" + multidisk_desc + ")" else: self.enduser_filename = enduser_filename statbuf = os.stat(self.absolute_path) self.last_modified_timestamp = format_datetime( datetime.fromtimestamp(statbuf.st_mtime)) print("Last modified timestamp " + self.last_modified_timestamp) self.image_format = "CLONEZILLA_FORMAT" self.short_device_node_partition_list = short_device_node_partition_list self.short_device_node_disk_list = [short_disk_device_node] self.ebr_dict = {} self.lvm_vg_dev_dict = {} self.lvm_logical_volume_dict = {} self.dev_fs_dict = {} if not self.is_needs_decryption: # The 'dev-fs.list' file contains the association between device nodes and the filesystems # (eg '/dev/sda2 ext4'). The filesystems are a combination of several sources, so the values may differ from # `blkid` and `parted`. Given newer versions of Clonezilla create this file, it makes sense to process it. dev_fs_list_filepath = os.path.join(dir, "dev-fs.list") if isfile(dev_fs_list_filepath): self.dev_fs_dict = ClonezillaImage.parse_dev_fs_list_output( Utility.read_file_into_string(dev_fs_list_filepath)) else: # Not raising exception because older Clonezilla images don't have this file. print("Unable to locate " + dev_fs_list_filepath) # The 'blkid.list' file provides a snapshot of the partitions on the system at the time of backup. This data # is not particularly relevant during a restore operation, except potentially for eg, UUID. # # Danger: Do not mistake this structure for the current system's 'blkid' information. # TODO: Re-evaluate the need to parse this file. The risk of mistaken usage may outweigh its usefulness. self.blkid_dict = {} blkid_list_filepath = os.path.join(dir, "blkid.list") if isfile(blkid_list_filepath): self.blkid_dict = Blkid.parse_blkid_output( Utility.read_file_into_string( os.path.join(dir, blkid_list_filepath))) else: # Not raising exception because older Clonezilla images don't have this file. print("Unable to locate " + blkid_list_filepath) # The 'lvm_vg_dev.list' file contains the association between an LVM VG (Logical Volume Manager volume group) # with a name eg 'vgtest', the LVM PV (physical volume) with a UUID name, and the device node that the physical # volume resides on eg, /dev/sdb. lvm_vg_dev_list_filepath = os.path.join(dir, "lvm_vg_dev.list") if isfile( lvm_vg_dev_list_filepath) and not self.is_needs_decryption: self.lvm_vg_dev_dict = Lvm.parse_volume_group_device_list_string( Utility.read_file_into_string(lvm_vg_dev_list_filepath)) else: print("No LVM volume group to device file detected in image") # The 'lvm_logv.list' file contains the association between device nodes and the filesystems (eg '/dev/sda2 ext4') lvm_logv_list_filepath = os.path.join(dir, "lvm_logv.list") if isfile(lvm_logv_list_filepath) and not self.is_needs_decryption: self.lvm_logical_volume_dict = Lvm.parse_logical_volume_device_list_string( Utility.read_file_into_string(lvm_logv_list_filepath)) else: print("No LVM logical volume file detected in image") self.parted_dict = {} self._mbr_absolute_path = {} self.post_mbr_gap_absolute_path = {} self.size_bytes = 0 self.enduser_readable_size = "unknown" self.size_bytes = 0 # Clonezilla -pt.parted file lists size in sectors, rather than bytes (or end-user readable KB/MB/GB/TB as # Clonezilla's -pt.parted.compact file) parted_filepath = os.path.join(dir, short_disk_device_node + "-pt.parted") if isfile(parted_filepath) and not self.is_needs_decryption: self.parted_dict = Parted.parse_parted_output( Utility.read_file_into_string(parted_filepath)) if 'capacity' in self.parted_dict and 'logical_sector_size' in \ self.parted_dict: self.size_bytes = self.parted_dict['capacity'] * \ self.parted_dict['logical_sector_size'] else: raise Exception("Unable to calculate disk capacity using " + parted_filepath + ": " + str(self.parted_dict)) else: # Do not raise exception because parted partition table is not present when using 'saveparts' print("Unable to locate " + parted_filepath + " or file is encrypted") if self.ecryptfs_info_dict is not None and 'size' in self.ecryptfs_info_dict.keys( ): self.enduser_readable_size = self.ecryptfs_info_dict['size'].strip( "_") self.normalized_sfdisk_dict = { 'absolute_path': None, 'sfdisk_dict': { 'partitions': {} }, 'file_length': 0 } if not is_needs_decryption: sfdisk_absolute_path = os.path.join( dir, short_disk_device_node + "-pt.sf") self.normalized_sfdisk_dict = Sfdisk.generate_normalized_sfdisk_dict( sfdisk_absolute_path, self) # There is a maximum of 1 MBR per drive (there can be many drives). Master Boot Record (MBR) is never # listed in 'parts' list. self._mbr_absolute_path = None mbr_glob_list = glob.glob( os.path.join(dir, short_disk_device_node) + "-mbr") for absolute_mbr_filepath in mbr_glob_list: self._mbr_absolute_path = absolute_mbr_filepath # There is a maximum of 1 post-MBR gap per drive (there can be many drives). The post MBR gap is never # listed in 'parts' list. Note the asterisk wildcard in the glob, to get the notes.txt file (see below) post_mbr_gap_glob_list = glob.glob( os.path.join(dir, short_disk_device_node) + "-hidden-data-after-mbr*") for absolute_post_mbr_gap_filepath in post_mbr_gap_glob_list: if absolute_post_mbr_gap_filepath.endswith( ".notes.txt") and not isfile( os.path.join(dir, short_disk_device_node) + "-hidden-data-after-mbr"): # When the post-MBR gap is not created by Clonezilla due to >1024 MB gap between MBR and first partition # there is a "notes.txt" file created which explains this. To maximize compatibility, in this # situation Rescuezilla v2.1+ creates a 1MB post-MBR gap backup *and* a notes.txt file. self.warning_dict[ short_disk_device_node + "mbr"] = "Backup is missing the \"post-MBR gap\" backup, most likely due to Clonezilla detecting a >1024MB gap between the MBR partition table and the first partition. Any GRUB bootloaders present will not restore correctly. In order to boot after restoring this backup, Clonezilla happens to workaround this situation by automatically re-installing GRUB, but current version of Rescuezilla does not implement this (but will in a future version). Clonezilla is available from within the Rescuezilla live environment by running `clonezilla` in a Terminal. See the following link for more information: https://github.com/rescuezilla/rescuezilla/issues/146" else: self.post_mbr_gap_absolute_path = { 'absolute_path': absolute_post_mbr_gap_filepath } # There is a maximum of 1 EBR per drive (there can be many drives). Extended Boot Record (EBR) is never # listed in 'parts' list. The asterisk is needed here because unlike the MBR case, the ebr file is eg, # sda4-ebr. In otherwords the EBR is associated with a partition not the base device node. ebr_glob_list = glob.glob( os.path.join(dir, short_disk_device_node) + "*-ebr") for absolute_ebr_filepath in ebr_glob_list: short_ebr_device_node = basename(absolute_ebr_filepath).split( "-ebr")[0] self.ebr_dict = { 'short_device_node': short_ebr_device_node, 'absolute_path': absolute_ebr_filepath } self.image_format_dict_dict = collections.OrderedDict([]) # Loops over the partitions listed in the 'parts' file for short_partition_device_node in self.short_device_node_partition_list: has_found_atleast_one_associated_image = False # For standard MBR and GPT partitions, the partition key listed in the 'parts' file has a directly # associated backup image, so check for this. image_format_dict = ClonezillaImage.scan_backup_image( dir, short_partition_device_node, self.is_needs_decryption) # If no match found check the LVM (Logical Volume Manager) if len(image_format_dict) == 0: # Loop over all the volume groups (if any) for vg_name in self.lvm_vg_dev_dict.keys(): # TODO: Evalulate if there are Linux multipath device nodes that hold LVM Physical Volumes. # TODO: May need to adjust for multipath device node by replacing "/" with "-" for this node. pv_short_device_node = re.sub( '/dev/', '', self.lvm_vg_dev_dict[vg_name]['device_node']) # Check if there is an associated LVM Physical Volume (PV) present if short_partition_device_node == pv_short_device_node: # Yes, the partition being analysed is associated with an LVM physical volume that contains # an LVM Volume Group. Now determine all backup images associated to Logical Volumes that # reside within this Volume Group. for lv_path in self.lvm_logical_volume_dict.keys(): candidate_lv_path_prefix = "/dev/" + vg_name + "/" # Eg, "/dev/cl/root".startswith("/dev/cl") if lv_path.startswith(candidate_lv_path_prefix): # Found a logical volume. Note: There may be more than one LV associated with an VG # Set the scan prefix for the backup image to eg "cl-root" logical_volume_scan_key = re.sub( '/', '-', re.sub('/dev/', '', lv_path)) image_format_dict = ClonezillaImage.scan_backup_image( dir, logical_volume_scan_key, self.is_needs_decryption) if len(image_format_dict) != 0: image_format_dict[ 'is_lvm_logical_volume'] = True image_format_dict[ 'volume_group_name'] = vg_name image_format_dict['physical_volume_long_device_node'] = \ self.lvm_vg_dev_dict[vg_name]['device_node'] image_format_dict[ 'logical_volume_long_device_node'] = lv_path self.image_format_dict_dict[ logical_volume_scan_key] = image_format_dict has_found_atleast_one_associated_image = True else: has_found_atleast_one_associated_image = True self.image_format_dict_dict[ short_partition_device_node] = image_format_dict if not has_found_atleast_one_associated_image: self.image_format_dict_dict[short_partition_device_node] = { 'type': "missing", 'prefix': short_partition_device_node, 'is_lvm_logical_volume': False } # TODO: Improve conversion between /dev/ nodes to short dev node. long_partition_key = "/dev/" + short_partition_device_node if long_partition_key in self.dev_fs_dict.keys(): # Annotate have filesystem information from dev-fs.list file. This case is expected when during a # backup Clonezilla or Rescuezilla failed to successfully image the filesystem, but may have # succeeded for other filesystems. fs = self.dev_fs_dict[long_partition_key]['filesystem'] self.image_format_dict_dict[short_partition_device_node][ 'filesystem'] = fs # TODO: Consider removing warning_dict as image_format_dict is sufficient. self.warning_dict[short_partition_device_node] = fs elif self.is_needs_decryption: self.warning_dict[short_partition_device_node] = _( "Needs decryption") else: self.warning_dict[short_partition_device_node] = _( "Unknown filesystem") # Unfortunately swap partitions are not listed in the 'parts' file. There does not appear to be any alternative # but scanning for the swap partitions and add them to the existing partitions, taking care to avoid duplicates # by rescanning what has already been scanned due to listing as an LVM logical volume. swap_partition_info_glob_list = glob.glob( os.path.join(dir, "swappt-*.info")) for swap_partition_info_glob in swap_partition_info_glob_list: key = Swappt.get_short_device_from_swappt_info_filename( swap_partition_info_glob) already_scanned = False for image_format_dict_key in self.image_format_dict_dict.keys(): if key == self.image_format_dict_dict[image_format_dict_key][ "prefix"]: already_scanned = True break if not already_scanned and not self.is_needs_decryption: self.image_format_dict_dict[key] = Swappt.parse_swappt_info( Utility.read_file_into_string(swap_partition_info_glob)) self.image_format_dict_dict[key]['type'] = "swap" self.image_format_dict_dict[key]['prefix'] = key self.image_format_dict_dict[key][ 'is_lvm_logical_volume'] = False total_size_estimate = 0 # Now we have all the images, compute the partition size estimates, and save it to avoid recomputing. for image_format_dict_key in self.image_format_dict_dict.keys(): # Try to find the short_disk_key for the image. This key is used to access the parted and sfdisk # partition table backups. It's not guaranteed there is a direct association between the backup image and # the partition table (for example, Logical Volume Manager logical volumes). associated_short_disk_key = "" for short_disk_key in self.short_device_node_disk_list: if image_format_dict_key.startswith(short_disk_key): associated_short_disk_key = short_disk_key estimated_size_bytes = self._compute_partition_size_byte_estimate( associated_short_disk_key, image_format_dict_key) self.image_format_dict_dict[image_format_dict_key][ 'estimated_size_bytes'] = estimated_size_bytes total_size_estimate += estimated_size_bytes if self.size_bytes == 0: # For md RAID devices, Clonezilla doesn't have a parted of sfdisk partition table containing the hard drive # size, so in that situation, summing the image sizes provides some kind of size estimate. self.size_bytes = total_size_estimate # Covert size in bytes to KB/MB/GB/TB as relevant self.enduser_readable_size = Utility.human_readable_filesize( int(self.size_bytes)) pp = pprint.PrettyPrinter(indent=4) pp.pprint(self.image_format_dict_dict)
def __init__(self, partition_long_device_node, absolute_path=None, enduser_filename=None): self.image_format = "METADATA_ONLY_FORMAT" self.long_device_node = partition_long_device_node if absolute_path is None: self.absolute_path = partition_long_device_node else: self.absolute_path = absolute_path if enduser_filename is None: self.absolute_path = partition_long_device_node else: self.enduser_filename = enduser_filename self.normalized_sfdisk_dict = {'absolute_path': None, 'sfdisk_dict': {'partitions': {}}, 'file_length': 0} self.user_notes = "" self.warning_dict = {} # Clonezilla format self.ebr_dict = {} self.efi_nvram_dat_absolute_path = None self.short_device_node_partition_list = [] self.short_device_node_disk_list = [] self.lvm_vg_dev_dict = {} self.lvm_logical_volume_dict = {} self.sfdisk_chs_dict = None self.dev_fs_dict = {} self.size_bytes = 0 self.enduser_readable_size = "" self.is_needs_decryption = False self.parted_dict = {'partitions': {}} self.post_mbr_gap_dict = {} self._mbr_absolute_path = None statbuf = os.stat(self.absolute_path) self.last_modified_timestamp = format_datetime(datetime.fromtimestamp(statbuf.st_mtime)) print("Last modified timestamp " + self.last_modified_timestamp) process, flat_command_string, failed_message = Utility.run("Get partition table", ["sfdisk", "--dump", partition_long_device_node], use_c_locale=True) if process.returncode != 0: # Expect devices without a partition table to not be able to extract partition table print("Could not extract filesystem using sfdisk: " + process.stderr) else: sfdisk_string = process.stdout f = tempfile.NamedTemporaryFile(mode='w', delete=False) f.write(sfdisk_string) f.close() self.normalized_sfdisk_dict = Sfdisk.generate_normalized_sfdisk_dict(f.name, self) if 'device' in self.normalized_sfdisk_dict['sfdisk_dict'].keys(): self.short_device_node_disk_list = [self.normalized_sfdisk_dict['sfdisk_dict']['device']] # Get the parted partition table. For convenience, using the bytes unit, not sectors. parted_process, flat_command_string, failed_message = Utility.run("Get filesystem information", ["parted", "--script", partition_long_device_node, "unit", "b", "print"], use_c_locale=True) if parted_process.returncode != 0: # Expect devices without a partition table to not be able to extract partition table print("Could not extract filesystem using parted: " + parted_process.stderr) self.parted_dict = Parted.parse_parted_output(parted_process.stdout) if len(self.short_device_node_disk_list) == 0 and 'long_dev_node' in self.parted_dict.keys(): self.short_device_node_disk_list = [self.parted_dict['long_dev_node']] pp = pprint.PrettyPrinter(indent=4) pp.pprint(self.parted_dict) lsblk_process, flat_command_string, failed_message = Utility.run("Querying device capacity", ["lsblk", "--getsize64", partition_long_device_node], use_c_locale=True) if lsblk_process.returncode != 0: # Expected for NBD device nodes print("Failed to get drive capacity from device node") # Create a CombinedDriveState structure for the MetadataOnlyImage, which may otherwise not be populated. lsblk_cmd_list = ["lsblk", "-o", "KNAME,NAME,SIZE,TYPE,FSTYPE,MOUNTPOINT,MODEL,SERIAL", "--paths", "--bytes", "--json", self.long_device_node] process, flat_command_string, fail_description = Utility.run("lsblk", lsblk_cmd_list, use_c_locale=True) lsblk_json_dict = json.loads(process.stdout) # blkid is called in DriveQuery and without arugments it prints information about all *partitions* in the system # (eg, /dev/sda1, /dev/sda2), but not th base device. But with an argument, it only prints out the base device. # But globbing using an wildcard match prints out the base device *and* the partitions. Not ideal, but it works. partition_device_glob_list = glob.glob(self.long_device_node + "*") blkid_cmd_list = ["blkid"] + partition_device_glob_list process, flat_command_string, fail_description = Utility.run("blkid", blkid_cmd_list, use_c_locale=True) blkid_dict = Blkid.parse_blkid_output(process.stdout) # OS Prober takes too long to run os_prober_dict = {} self.drive_state = CombinedDriveState.construct_combined_drive_state_dict(lsblk_json_dict=lsblk_json_dict, blkid_dict=blkid_dict, osprober_dict=os_prober_dict, parted_dict_dict={self.long_device_node:self.parted_dict}, sfdisk_dict_dict={self.long_device_node:self.normalized_sfdisk_dict}) pp = pprint.PrettyPrinter(indent=4) pp.pprint(self.drive_state) self.image_format_dict_dict = collections.OrderedDict([]) total_size_estimate = 0 drive_state_partitions_dict = self.drive_state[self.long_device_node]['partitions'] for partition_long_device_node in drive_state_partitions_dict: if 'type' in drive_state_partitions_dict[partition_long_device_node].keys() \ and drive_state_partitions_dict[partition_long_device_node]['type'] == "extended": # Skip extended partitions as they will be handled by the '-ebr' file continue self.image_format_dict_dict[partition_long_device_node] = {'type': "raw", 'compression': "uncompressed", 'is_lvm_logical_volume': False, 'filesystem': drive_state_partitions_dict[partition_long_device_node]['filesystem']} # Estimate the disk size from sfdisk partition table backup last_partition_key, last_partition_final_byte = Sfdisk.get_highest_offset_partition(self.normalized_sfdisk_dict) self.size_bytes = last_partition_final_byte if self.size_bytes == 0: self.size_bytes = self.parted_dict['capacity'] # Covert size in bytes to KB/MB/GB/TB as relevant self.enduser_readable_size = Utility.human_readable_filesize(int(self.size_bytes))
def do_backup(self): self.at_least_one_non_fatal_error = False self.requested_stop = False # Clear proc dictionary self.proc.clear() self.summary_message_lock = threading.Lock() self.summary_message = "" env = Utility.get_env_C_locale() print("mkdir " + self.dest_dir) os.mkdir(self.dest_dir) short_selected_device_node = re.sub('/dev/', '', self.selected_drive_key) enduser_date = datetime.today().strftime('%Y-%m-%d-%H%M') clonezilla_img_filepath = os.path.join(self.dest_dir, "clonezilla-img") with open(clonezilla_img_filepath, 'w') as filehandle: try: output = "This image was saved by Rescuezilla at " + enduser_date + "\nSaved by " + self.human_readable_version + "\nThe log during saving:\n----------------------------------------------------------\n\n" filehandle.write(output) except: tb = traceback.format_exc() traceback.print_exc() error_message = _( "Failed to write destination file. Please confirm it is valid to create the provided file path, and try again." ) + "\n\n" + tb GLib.idle_add(self.completed_backup, False, error_message) return self.logger = Logger(clonezilla_img_filepath) GLib.idle_add(self.update_backup_progress_bar, 0) process, flat_command_string, failed_message = Utility.run( "Saving blkdev.list", [ "lsblk", "-oKNAME,NAME,SIZE,TYPE,FSTYPE,MOUNTPOINT,MODEL", self.selected_drive_key ], use_c_locale=True, output_filepath=os.path.join(self.dest_dir, "blkdev.list"), logger=self.logger) if process.returncode != 0: with self.summary_message_lock: self.summary_message += failed_message GLib.idle_add(self.completed_backup, False, failed_message) return blkid_cmd_list = ["blkid"] sort_cmd_list = ["sort", "-V"] Utility.print_cli_friendly("blkid ", [blkid_cmd_list, sort_cmd_list]) self.proc['blkid'] = subprocess.Popen(blkid_cmd_list, stdout=subprocess.PIPE, env=env, encoding='utf-8') process, flat_command_string, failed_message = Utility.run( "Saving blkid.list", ["blkid"], use_c_locale=True, output_filepath=os.path.join(self.dest_dir, "blkid.list"), logger=self.logger) if process.returncode != 0: with self.summary_message_lock: self.summary_message += failed_message GLib.idle_add(self.completed_backup, False, failed_message) return process, flat_command_string, failed_message = Utility.run( "Saving Info-lshw.txt", ["lshw"], use_c_locale=True, output_filepath=os.path.join(self.dest_dir, "Info-lshw.txt"), logger=self.logger) if process.returncode != 0: with self.summary_message_lock: self.summary_message += failed_message GLib.idle_add(self.completed_backup, False, failed_message) return info_dmi_txt_filepath = os.path.join(self.dest_dir, "Info-dmi.txt") with open(info_dmi_txt_filepath, 'w') as filehandle: filehandle.write( "# This image was saved from this machine with DMI info at " + enduser_date + ":\n") filehandle.flush() process, flat_command_string, failed_message = Utility.run( "Saving Info-dmi.txt", ["dmidecode"], use_c_locale=True, output_filepath=info_dmi_txt_filepath, logger=self.logger) if process.returncode != 0: with self.summary_message_lock: self.summary_message += failed_message GLib.idle_add(self.completed_backup, False, failed_message) return info_lspci_filepath = os.path.join(self.dest_dir, "Info-lspci.txt") with open(info_lspci_filepath, 'w') as filehandle: # TODO: Improve datetime format string. filehandle.write( "This image was saved from this machine with PCI info at " + enduser_date + "\n") filehandle.write("'lspci' results:\n") filehandle.flush() process, flat_command_string, failed_message = Utility.run( "Appending `lspci` output to Info-lspci.txt", ["lspci"], use_c_locale=True, output_filepath=info_lspci_filepath, logger=self.logger) if process.returncode != 0: with self.summary_message_lock: self.summary_message += failed_message GLib.idle_add(self.completed_backup, False, failed_message) return msg_delimiter_star_line = "*****************************************************." with open(info_lspci_filepath, 'a+') as filehandle: filehandle.write(msg_delimiter_star_line + "\n") filehandle.write("'lspci -n' results:\n") filehandle.flush() # Show PCI vendor and device codes as numbers instead of looking them up in the PCI ID list. process, flat_command_string, failed_message = Utility.run( "Appending `lspci -n` output to Info-lspci.txt", ["lspci", "-n"], use_c_locale=True, output_filepath=info_lspci_filepath, logger=self.logger) if process.returncode != 0: with self.summary_message_lock: self.summary_message += failed_message GLib.idle_add(self.completed_backup, False, failed_message) return info_smart_filepath = os.path.join(self.dest_dir, "Info-smart.txt") with open(info_smart_filepath, 'w') as filehandle: filehandle.write( "This image was saved from this machine with hard drive S.M.A.R.T. info at " + enduser_date + "\n") filehandle.write(msg_delimiter_star_line + "\n") filehandle.write("For the drive: " + self.selected_drive_key + "\n") filehandle.flush() # VirtualBox doesn't support smart, so ignoring the exit code here. # FIXME: Improve this. process, flat_command_string, failed_message = Utility.run( "Saving Info-smart.txt", ["smartctl", "--all", self.selected_drive_key], use_c_locale=True, output_filepath=info_smart_filepath, logger=self.logger) filepath = os.path.join(self.dest_dir, "Info-packages.txt") # Save Debian package informtion if shutil.which("dpkg") is not None: rescuezilla_package_list = ["rescuezilla", "util-linux", "gdisk"] with open(filepath, 'w') as filehandle: filehandle.write( "Image was saved by these Rescuezilla-related packages:\n " ) for pkg in rescuezilla_package_list: dpkg_process = subprocess.run(['dpkg', "--status", pkg], capture_output=True, encoding="UTF-8") if dpkg_process.returncode != 0: continue for line in dpkg_process.stdout.split("\n"): if re.search("^Version: ", line): version = line[len("Version: "):] filehandle.write(pkg + "-" + version + " ") filehandle.write("\nSaved by " + self.human_readable_version + ".\n") # TODO: Clonezilla creates a file named "Info-saved-by-cmd.txt" file, to allow users to re-run the exact # command again without going through the wizard. The proposed Rescuezilla approach to this feature is # discussed here: https://github.com/rescuezilla/rescuezilla/issues/106 filepath = os.path.join(self.dest_dir, "parts") with open(filepath, 'w') as filehandle: i = 0 for partition_key in self.partitions_to_backup: short_partition_key = re.sub('/dev/', '', partition_key) to_backup_dict = self.partitions_to_backup[partition_key] is_swap = False if 'filesystem' in to_backup_dict.keys( ) and to_backup_dict['filesystem'] == "swap": is_swap = True if 'type' not in to_backup_dict.keys( ) or 'type' in to_backup_dict.keys( ) and 'extended' != to_backup_dict['type'] and not is_swap: # Clonezilla does not write the extended partition node into the parts file, # nor does it write swap partition node filehandle.write('%s' % short_partition_key) # Ensure no trailing space on final iteration (to match Clonezilla format exactly) if i + 1 != len(self.partitions_to_backup.keys()): filehandle.write(' ') i += 1 filehandle.write('\n') filepath = os.path.join(self.dest_dir, "disk") with open(filepath, 'w') as filehandle: filehandle.write('%s\n' % short_selected_device_node) compact_parted_filename = short_selected_device_node + "-pt.parted.compact" # Parted drive information with human-readable "compact" units: KB/MB/GB rather than sectors. process, flat_command_string, failed_message = Utility.run( "Saving " + compact_parted_filename, [ "parted", "--script", self.selected_drive_key, "unit", "compact", "print" ], use_c_locale=True, output_filepath=os.path.join(self.dest_dir, compact_parted_filename), logger=self.logger) if process.returncode != 0: with self.summary_message_lock: self.summary_message += failed_message GLib.idle_add(self.completed_backup, False, failed_message) return # Parted drive information with standard sector units. Clonezilla doesn't output easily parsable output using # the --machine flag, so for maximum Clonezilla compatibility neither does Rescuezilla. parted_filename = short_selected_device_node + "-pt.parted" parted_process, flat_command_string, failed_message = Utility.run( "Saving " + parted_filename, [ "parted", "--script", self.selected_drive_key, "unit", "s", "print" ], use_c_locale=True, output_filepath=os.path.join(self.dest_dir, parted_filename), logger=self.logger) if process.returncode != 0: with self.summary_message_lock: self.summary_message += failed_message GLib.idle_add(self.completed_backup, False, failed_message) return parted_dict = Parted.parse_parted_output(parted_process.stdout) partition_table = parted_dict['partition_table'] # Save MBR for both msdos and GPT disks if "gpt" == partition_table or "msdos" == partition_table: filepath = os.path.join(self.dest_dir, short_selected_device_node + "-mbr") process, flat_command_string, failed_message = Utility.run( "Saving " + filepath, [ "dd", "if=" + self.selected_drive_key, "of=" + filepath, "bs=512", "count=1" ], use_c_locale=False, logger=self.logger) if process.returncode != 0: with self.summary_message_lock: self.summary_message += failed_message GLib.idle_add(self.completed_backup, False, failed_message) return if "gpt" == partition_table: first_gpt_filename = short_selected_device_node + "-gpt-1st" dd_process, flat_command_string, failed_message = Utility.run( "Saving " + first_gpt_filename, [ "dd", "if=" + self.selected_drive_key, "of=" + os.path.join(self.dest_dir, first_gpt_filename), "bs=512", "count=34" ], use_c_locale=False, logger=self.logger) if process.returncode != 0: with self.summary_message_lock: self.summary_message += failed_message GLib.idle_add(self.completed_backup, False, failed_message) return # From Clonezilla's scripts/sbin/ocs-functions: # We need to get the total size of disk so that we can skip and dump the last block: # The output of 'parted -s /dev/sda unit s print' is like: # -------------------- # Disk /dev/hda: 16777215s # Sector size (logical/physical): 512B/512B # Partition Table: gpt # # Number Start End Size File system Name Flags # 1 34s 409640s 409607s fat32 primary msftres # 2 409641s 4316406s 3906766s ext2 primary # 3 4316407s 15625000s 11308594s reiserfs primary # -------------------- # to_seek = "$((${src_disk_size_sec}-33+1))" to_skip = parted_dict['capacity'] - 32 second_gpt_filename = short_selected_device_node + "-gpt-2nd" process, flat_command_string, failed_message = Utility.run( "Saving " + second_gpt_filename, [ "dd", "if=" + self.selected_drive_key, "of=" + os.path.join(self.dest_dir, second_gpt_filename), "skip=" + str(to_skip), "bs=512", "count=33" ], use_c_locale=False, logger=self.logger) if process.returncode != 0: with self.summary_message_lock: self.summary_message += failed_message GLib.idle_add(self.completed_backup, False, failed_message) return # LC_ALL=C sgdisk -b $target_dir_fullpath/$(to_filename ${ihd})-gpt.gdisk /dev/$ihd | tee --append ${OCS_LOGFILE} gdisk_filename = short_selected_device_node + "-gpt.gdisk" process, flat_command_string, failed_message = Utility.run( "Saving " + gdisk_filename, [ "sgdisk", "--backup", os.path.join(self.dest_dir, gdisk_filename), self.selected_drive_key ], use_c_locale=True, logger=self.logger) if process.returncode != 0: with self.summary_message_lock: self.summary_message += failed_message GLib.idle_add(self.completed_backup, False, failed_message) return sgdisk_filename = short_selected_device_node + "-gpt.sgdisk" process, flat_command_string, failed_message = Utility.run( "Saving " + sgdisk_filename, ["sgdisk", "--print", self.selected_drive_key], use_c_locale=True, output_filepath=os.path.join(self.dest_dir, sgdisk_filename), logger=self.logger) if process.returncode != 0: with self.summary_message_lock: self.summary_message += failed_message GLib.idle_add(self.completed_backup, False, failed_message) return elif "msdos" == partition_table: # image_save first_partition_key, first_partition_offset_bytes = CombinedDriveState.get_first_partition( self.partitions_to_backup) # Maximum hidden data to backup is 1024MB hidden_data_after_mbr_limit = 1024 * 1024 * 1024 if first_partition_offset_bytes > hidden_data_after_mbr_limit: self.logger.write( "Calculated very large hidden data after MBR size. Skipping" ) else: first_partition_offset_sectors = int( first_partition_offset_bytes / 512) hidden_mbr_data_filename = short_selected_device_node + "-hidden-data-after-mbr" # FIXME: Appears one sector too large. process, flat_command_string, failed_message = Utility.run( "Saving " + hidden_mbr_data_filename, [ "dd", "if=" + self.selected_drive_key, "of=" + os.path.join(self.dest_dir, hidden_mbr_data_filename), "skip=1", "bs=512", "count=" + str(first_partition_offset_sectors) ], use_c_locale=False, logger=self.logger) if process.returncode != 0: with self.summary_message_lock: self.summary_message += failed_message GLib.idle_add(self.completed_backup, False, failed_message) return else: self.logger.write("Partition table is: " + partition_table) # Parted sees drives with direct filesystem applied as loop partition table. if partition_table is not None and partition_table != "loop": sfdisk_filename = short_selected_device_node + "-pt.sf" process, flat_command_string, failed_message = Utility.run( "Saving " + sfdisk_filename, ["sfdisk", "--dump", self.selected_drive_key], output_filepath=os.path.join(self.dest_dir, sfdisk_filename), use_c_locale=True, logger=self.logger) if process.returncode != 0: with self.summary_message_lock: self.summary_message += failed_message GLib.idle_add(self.completed_backup, False, failed_message) return process, flat_command_string, failed_message = Utility.run( "Retreiving disk geometry with sfdisk ", ["sfdisk", "--show-geometry", self.selected_drive_key], use_c_locale=True, logger=self.logger) if process.returncode != 0: with self.summary_message_lock: self.summary_message += failed_message GLib.idle_add(self.completed_backup, False, failed_message) return geometry_dict = Sfdisk.parse_sfdisk_show_geometry(process.stdout) filepath = os.path.join(self.dest_dir, short_selected_device_node + "-chs.sf") with open(filepath, 'w') as filehandle: for key in geometry_dict.keys(): output = key + "=" + str(geometry_dict[key]) self.logger.write(output) filehandle.write('%s\n' % output) # Query all Physical Volumes (PV), Volume Group (VG) and Logical Volume (LV). See unit test for a worked example. # TODO: In the Rescuezilla application architecture, this LVM information is best extracted during the drive # TODO: query step, and then integrated into the "combined drive state" dictionary. Doing it during the backup # TODO: process matches how Clonezilla does it, which is sufficient for now. # FIXME: This section is duplicated in partitions_to_restore.py. # Start the Logical Volume Manager (LVM). Caller raises Exception on failure Lvm.start_lvm2(self.logger) relevant_vg_name_dict = {} vg_state_dict = Lvm.get_volume_group_state_dict(self.logger) for partition_key in list(self.partitions_to_backup.keys()): for report_dict in vg_state_dict['report']: for vg_dict in report_dict['vg']: if 'pv_name' in vg_dict.keys( ) and partition_key == vg_dict['pv_name']: if 'vg_name' in vg_dict.keys(): vg_name = vg_dict['vg_name'] else: GLib.idle_add( ErrorMessageModalPopup. display_nonfatal_warning_message, self.builder, "Could not find volume group name vg_name in " + str(vg_dict)) # TODO: Re-evaluate how exactly Clonezilla uses /NOT_FOUND and whether introducing it here # TODO: could improve Rescuezilla/Clonezilla interoperability. continue if 'pv_uuid' in vg_dict.keys(): pv_uuid = vg_dict['pv_uuid'] else: GLib.idle_add( ErrorMessageModalPopup. display_nonfatal_warning_message, self.builder, "Could not find physical volume UUID pv_uuid in " + str(vg_dict)) continue relevant_vg_name_dict[vg_name] = partition_key lvm_vg_dev_list_filepath = os.path.join( self.dest_dir, "lvm_vg_dev.list") with open(lvm_vg_dev_list_filepath, 'a+') as filehandle: filehandle.write(vg_name + " " + partition_key + " " + pv_uuid + "\n") lv_state_dict = Lvm.get_logical_volume_state_dict(self.logger) for report_dict in lv_state_dict['report']: for lv_dict in report_dict['lv']: # Only consider VGs that match the partitions to backup list if 'vg_name' in lv_dict.keys( ) and lv_dict['vg_name'] in relevant_vg_name_dict.keys(): vg_name = lv_dict['vg_name'] if 'lv_path' in lv_dict.keys(): lv_path = lv_dict['lv_path'] else: GLib.idle_add( ErrorMessageModalPopup. display_nonfatal_warning_message, self.builder, "Could not find lv_path name in " + str(lv_dict)) continue file_command_process, flat_command_string, failed_message = Utility.run( "logical volume file info", ["file", "--dereference", "--special-files", lv_path], use_c_locale=True, logger=self.logger) if file_command_process.returncode != 0: with self.summary_message_lock: self.summary_message += failed_message GLib.idle_add(self.completed_backup, False, failed_message) return output = file_command_process.stdout.split( " ", maxsplit=1)[1].strip() lvm_logv_list_filepath = os.path.join( self.dest_dir, "lvm_logv.list") # Append to file with open(lvm_logv_list_filepath, 'a+') as filehandle: filehandle.write(lv_path + " " + output + "\n") if 'lv_dm_path' in lv_dict.keys(): # Device mapper path, eg /dev/mapper/vgtest-lvtest lv_dm_path = lv_dict['lv_dm_path'] else: GLib.idle_add( self.completed_backup, False, "Could not find lv_dm_path name in " + str(lv_dict)) return if lv_dm_path in self.drive_state.keys( ) and 'partitions' in self.drive_state[lv_dm_path].keys(): # Remove the partition key associated with the volume group that contains this LVM logical volume # eg, /dev/sdc1 with detected filesystem, and replace it with the logical volume filesystem. # In other words, don't backup both the /dev/sdc1 device node AND the /dev/mapper node. long_partition_key = relevant_vg_name_dict[ lv_dict['vg_name']] self.partitions_to_backup.pop(long_partition_key, None) for logical_volume in self.drive_state[lv_dm_path][ 'partitions'].keys(): # Use the system drive state to get the exact filesystem for this /dev/mapper/ node, # as derived from multiple sources (parted, lsblk etc) like how Clonezilla does it. self.partitions_to_backup[ lv_path] = self.drive_state[lv_dm_path][ 'partitions'][logical_volume] self.partitions_to_backup[lv_path]['type'] = 'part' lvm_vgname_filepath = os.path.join( self.dest_dir, "lvm_" + vg_name + ".conf") # TODO: Evaluate the Clonezilla message from 2013 message that this command won't work on NFS # TODO: due to a vgcfgbackup file lock issue. vgcfgbackup_process, flat_command_string, failed_message = Utility.run( "Saving LVM VG config " + lvm_vgname_filepath, [ "vgcfgbackup", "--file", lvm_vgname_filepath, vg_name ], use_c_locale=True, logger=self.logger) if vgcfgbackup_process.returncode != 0: with self.summary_message_lock: self.summary_message += failed_message GLib.idle_add(self.completed_backup, False, failed_message) return filepath = os.path.join(self.dest_dir, "dev-fs.list") with open(filepath, 'w') as filehandle: filehandle.write('# <Device name> <File system>\n') filehandle.write( '# The file systems detected below are a combination of several sources. The values may differ from `blkid` and `parted`.\n' ) for partition_key in self.partitions_to_backup.keys(): filesystem = self.partitions_to_backup[partition_key][ 'filesystem'] filehandle.write('%s %s\n' % (partition_key, filesystem)) partition_number = 0 for partition_key in self.partitions_to_backup.keys(): partition_number += 1 total_progress_float = Utility.calculate_progress_ratio( 0, partition_number, len(self.partitions_to_backup.keys())) GLib.idle_add(self.update_backup_progress_bar, total_progress_float) is_unmounted, message = Utility.umount_warn_on_busy(partition_key) if not is_unmounted: self.logger.write(message) with self.summary_message_lock: self.summary_message += message + "\n" GLib.idle_add(self.completed_backup, False, message) short_device_node = re.sub('/dev/', '', partition_key) short_device_node = re.sub('/', '-', short_device_node) filesystem = self.partitions_to_backup[partition_key]['filesystem'] if 'type' in self.partitions_to_backup[partition_key].keys() and 'extended' in \ self.partitions_to_backup[partition_key]['type']: self.logger.write("Detected " + partition_key + " as extended partition. Backing up EBR") filepath = os.path.join(self.dest_dir, short_device_node + "-ebr") process, flat_command_string, failed_message = Utility.run( "Saving " + filepath, [ "dd", "if=" + partition_key, "of=" + filepath, "bs=512", "count=1" ], use_c_locale=False, logger=self.logger) if process.returncode != 0: with self.summary_message_lock: self.summary_message += failed_message GLib.idle_add(self.completed_backup, False, failed_message) return if filesystem == 'swap': filepath = os.path.join( self.dest_dir, "swappt-" + short_device_node + ".info") with open(filepath, 'w') as filehandle: uuid = "" label = "" if 'uuid' in self.partitions_to_backup[partition_key].keys( ): uuid = self.partitions_to_backup[partition_key]['uuid'] if 'label' in self.partitions_to_backup[ partition_key].keys(): label = self.partitions_to_backup[partition_key][ 'label'] filehandle.write('UUID="%s"\n' % uuid) filehandle.write('LABEL="%s"\n' % label) with self.summary_message_lock: self.summary_message += _( "Successful backup of swap partition {partition_name}" ).format(partition_name=partition_key) + "\n" continue # Clonezilla uses -q2 priority by default (partclone > partimage > dd). # PartImage does not appear to be maintained software, so for simplicity, Rescuezilla is using a # partclone > partclone.dd priority # [1] https://clonezilla.org/clonezilla-live/doc/01_Save_disk_image/advanced/09-advanced-param.php # Expand upon Clonezilla's ocs-get-comp-suffix() function compression_suffix = "gz" split_size = "4GB" # Partclone dd blocksize (16MB) partclone_dd_bs = "16777216" # TODO: Re-enable APFS support -- currently partclone Apple Filesystem is not used because it's too unstable [1] # [1] https://github.com/rescuezilla/rescuezilla/issues/65 if shutil.which("partclone." + filesystem) is not None and filesystem != "apfs": partclone_cmd_list = [ "partclone." + filesystem, "--logfile", "/var/log/partclone.log", "--clone", "--source", partition_key, "--output", "-" ] filepath = os.path.join( self.dest_dir, short_device_node + "." + filesystem + "-ptcl-img." + compression_suffix + ".") split_cmd_list = [ "split", "--suffix-length=2", "--bytes=" + split_size, "-", filepath ] elif shutil.which("partclone.dd") is not None: partclone_cmd_list = [ "partclone.dd", "--buffer_size=" + partclone_dd_bs, "--logfile", "/var/log/partclone.log", "--source", partition_key, "--output", "-" ] filepath = os.path.join( self.dest_dir, short_device_node + ".dd-ptcl-img." + compression_suffix + ".") split_cmd_list = [ "split", "--suffix-length=2", "--bytes=" + split_size, "-", filepath ] else: GLib.idle_add(self.completed_backup, False, "Partclone not found.") return filesystem_backup_message = _( "Backup {partition_name} containing filesystem {filesystem} to {destination}" ).format(partition_name=partition_key, filesystem=filesystem, destination=filepath) GLib.idle_add(self.update_main_statusbar, filesystem_backup_message) self.logger.write(filesystem_backup_message) gzip_cmd_list = ["gzip", "--stdout"] self.proc['partclone_backup_' + partition_key] = subprocess.Popen( partclone_cmd_list, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env, encoding='utf-8') self.proc['gzip_' + partition_key] = subprocess.Popen( gzip_cmd_list, stdin=self.proc['partclone_backup_' + partition_key].stdout, stdout=subprocess.PIPE, env=env, encoding='utf-8') self.proc['split_' + partition_key] = subprocess.Popen( split_cmd_list, stdin=self.proc['gzip_' + partition_key].stdout, stdout=subprocess.PIPE, env=env, encoding='utf-8') # Process partclone output. Partclone outputs an update every 3 seconds, so processing the data # on the current thread, for simplicity. # Poll process.stdout to show stdout live while True: if self.requested_stop: return output = self.proc['partclone_backup_' + partition_key].stderr.readline() if self.proc['partclone_backup_' + partition_key].poll() is not None: break if output: temp_dict = Partclone.parse_partclone_output(output) if 'completed' in temp_dict.keys(): total_progress_float = Utility.calculate_progress_ratio( temp_dict['completed'] / 100.0, partition_number, len(self.partitions_to_backup.keys())) GLib.idle_add(self.update_backup_progress_bar, total_progress_float) if 'remaining' in temp_dict.keys(): GLib.idle_add( self.update_backup_progress_status, filesystem_backup_message + "\n\n" + output) rc = self.proc['partclone_backup_' + partition_key].poll() self.proc['partclone_backup_' + partition_key].stdout.close( ) # Allow p1 to receive a SIGPIPE if p2 exits. self.proc['gzip_' + partition_key].stdout.close( ) # Allow p2 to receive a SIGPIPE if p3 exits. output, err = self.proc['partclone_backup_' + partition_key].communicate() self.logger.write("Exit output " + str(output) + "stderr " + str(err)) if self.proc['partclone_backup_' + partition_key].returncode != 0: partition_summary = _( "<b>Failed to backup partition</b> {partition_name}" ).format(partition_name=partition_key) + "\n" with self.summary_message_lock: self.summary_message += partition_summary self.at_least_one_non_fatal_error = True proc_stdout = self.proc['partclone_backup_' + partition_key].stdout proc_stderr = self.proc['partclone_backup_' + partition_key].stderr extra_info = "\nThe command used internally was:\n\n" + flat_command_string + "\n\n" + "The output of the command was: " + str( proc_stdout) + "\n\n" + str(proc_stderr) compression_stderr = self.proc['gzip_' + partition_key].stderr if compression_stderr is not None and compression_stderr != "": extra_info += "\n\n" + str( gzip_cmd_list) + " stderr: " + compression_stderr # TODO: Try to backup again, but using partclone.dd GLib.idle_add( ErrorMessageModalPopup.display_nonfatal_warning_message, self.builder, partition_summary + extra_info) else: with self.summary_message_lock: self.summary_message += _( "Successful backup of partition {partition_name}" ).format(partition_name=partition_key) + "\n" # GLib.idle_add(self.update_progress_bar, (i + 1) / len(self.restore_mapping_dict.keys())) if self.requested_stop: return progress_ratio = i / len(self.partitions_to_backup.keys()) i += 1 # Display 100% progress for user GLib.idle_add(self.update_backup_progress_bar, progress_ratio) sleep(1.0) """ partclone_cmd_list = ["partclone", "--logfile", "/tmp/rescuezilla_logfile.txt", "--overwrite", "/dev/"] if [ "$fs_p" != "dd" ]; then cmd_partclone="partclone.${fs_p} $PARTCLONE_SAVE_OPT -L $partclone_img_info_tmp -c -s $source_dev --output - | $compress_prog_opt" else # Some parameters for partclone.dd are not required. Here "-c" is not provided by partclone.dd when saving. cmd_partclone="partclone.${fs_p} $PARTCLONE_SAVE_OPT --buffer_size ${partclone_dd_bs} -L $partclone_img_info_tmp -s $source_dev --output - | $compress_prog_opt" fi case "$VOL_LIMIT" in [1-9]*) # $tgt_dir/${tgt_file}.${fs_pre}-img. is prefix, the last "." is necessary make the output file is like hda1.${fs_pre}-img.aa, hda1.${fs_pre}-img.ab. We do not add -d to make it like hda1.${fs_pre}-img.00, hda1.${fs_pre}-img.01, since it will confuse people that it looks like created by partimage (hda1.${fs_pre}-img.000, hda1.${fs_pre}-img.001) cmd_partclone="${cmd_partclone} | split -a $split_suf_len -b ${VOL_LIMIT}MB - $tgt_dir/$(to_filename ${tgt_file}).${fs_pre}-img.${comp_suf}. 2> $split_error" ;; *) cmd_partclone="${cmd_partclone} > $tgt_dir/$(to_filename ${tgt_file}).${fs_pre}-img.${comp_suf} 2> $split_error" ;; esac echo "Run partclone: $cmd_partclone" | tee --append ${OCS_LOGFILE} LC_ALL=C eval "(${cmd_partclone} && exit \${PIPESTATUS[0]})" cmd_partimage = "partimage $DEFAULT_PARTIMAGE_SAVE_OPT $PARTIMAGE_SAVE_OPT -B gui=no save $source_dev stdout | $compress_prog_opt" #case #"$VOL_LIMIT" in #[1 - 9] *) # "$tgt_dir/${tgt_file}." is prefix, the last "." is necessary # make the output file is like hda1.aa, hda1.ab. # We do not add -d to make it like hda1.00, hda1.01, since it will confuse people that it looks like created by partimage (hda1.000, hda1.001) cmd_partimage = "${cmd_partimage} | split -a $split_suf_len -b ${VOL_LIMIT}MB - $tgt_dir/${tgt_file}." """ # Do checksum # IMG_ID=$(LC_ALL=C sha512sum $img_dir/clonezilla-img | awk -F" " '{print $1}')" >> $img_dir/Info-img-id.txt GLib.idle_add(self.completed_backup, True, "")
def _do_drive_query(self): env_C_locale = Utility.get_env_C_locale() drive_query_start_time = datetime.now() lsblk_cmd_list = [ "lsblk", "-o", "KNAME,NAME,SIZE,TYPE,FSTYPE,MOUNTPOINT,MODEL", "--paths", "--bytes", "--json" ] blkid_cmd_list = ["blkid"] os_prober_cmd_list = ["os-prober"] lsblk_json_dict = {} blkid_dict = {} os_prober_dict = {} parted_dict_dict = collections.OrderedDict([]) sfdisk_dict_dict = collections.OrderedDict([]) # Clonezilla combines drive, partition and filesystem from multiple data sources (lsblk, blkid, parted etc) # Rescuezilla continues this approach to reach best possible Clonezilla compatibility. # # However this sequential querying is slow. A parallel approach should be in theory much faster (but might be # less reliable if internal commands are creating file locks etc.) # # In practice, the sequential approach was about 25% faster than a first-cut (polling-based) parallel approach. # Parallel mode currently disabled, but kept for further development/analysis. mode = "sequential-drive-query" if mode == "sequential-drive-query": print("Running drive query in sequential mode") lsblk_stdout, lsblk_stderr, lsblk_return_code = Utility.run_external_command( lsblk_cmd_list, self.temp_callback, env_C_locale) lsblk_json_dict = json.loads(lsblk_stdout) blkid_stdout, blkid_stderr, blkid_return_code = Utility.run_external_command( blkid_cmd_list, self.temp_callback, env_C_locale) blkid_dict = Blkid.parse_blkid_output(blkid_stdout) # Use os-prober to get OS information (running WITH original locale information os_prober_stdout, os_prober_stderr, os_prober_return_code = Utility.run_external_command( os_prober_cmd_list, self.temp_callback, os.environ.copy()) os_prober_dict = OsProber.parse_os_prober_output(os_prober_stdout) for lsblk_dict in lsblk_json_dict['blockdevices']: partition_longdevname = lsblk_dict['name'] print("Going to run parted and sfdisk on " + partition_longdevname) try: parted_stdout, parted_stderr, parted_return_code = Utility.run_external_command( self._get_parted_cmd_list(partition_longdevname), self.temp_callback, env_C_locale) if "unrecognized disk label" not in parted_stderr: parted_dict_dict[ partition_longdevname] = Parted.parse_parted_output( parted_stdout) else: print("Parted says " + parted_stderr) sfdisk_stdout, sfdisk_stderr, sfdisk_return_code = Utility.run_external_command( self._get_sfdisk_cmd_list(partition_longdevname), self.temp_callback, env_C_locale) sfdisk_dict_dict[ partition_longdevname] = Sfdisk.parse_sfdisk_dump_output( sfdisk_stdout) except Exception: print("Could run run parted on " + partition_longdevname) elif mode == "parallel-drive-query": print("Running drive query in parallel mode") # Launch drive query in parallel. Parallel Python subprocess.Popen() approach adapted from [1] # [1] https://stackoverflow.com/a/636601 cmd_dict = { ('lsblk', ""): subprocess.Popen(lsblk_cmd_list, env=env_C_locale, stdout=subprocess.PIPE, stderr=subprocess.PIPE, encoding="utf-8", universal_newlines=True), ('blkid', ""): subprocess.Popen(blkid_cmd_list, env=env_C_locale, stdout=subprocess.PIPE, stderr=subprocess.PIPE, encoding="utf-8", universal_newlines=True), ('os_prober', ""): subprocess.Popen(os_prober_cmd_list, env=env_C_locale, stdout=subprocess.PIPE, stderr=subprocess.PIPE, encoding="utf-8", universal_newlines=True), } while cmd_dict: print("drive_query_process is length " + str(len(cmd_dict)) + " with contents " + str(cmd_dict)) for key in list(cmd_dict.keys()): proc = cmd_dict[key] retcode = proc.poll() if retcode is not None: # Process finished. cmd_dict.pop(key, None) if key[0] == "lsblk" and retcode == 0: # lsblk is complete, partition information can be used to launch the parted/sfdisk lsblk_json_dict = json.loads(proc.stdout.read()) for lsblk_dict in lsblk_json_dict['blockdevices']: partition_longdevname = lsblk_dict['name'] print("Launching parted and sfdisk on " + partition_longdevname) try: cmd_dict[("parted", partition_longdevname )] = subprocess.Popen( self._get_parted_cmd_list( partition_longdevname), env=env_C_locale, encoding="utf-8", universal_newlines=True) cmd_dict[("sfdisk", partition_longdevname )] = subprocess.Popen( self._get_sfdisk_cmd_list( partition_longdevname), env=env_C_locale, encoding="utf-8", universal_newlines=True) except Exception: print("Could launch sfdisk or parted on " + partition_longdevname) elif key[0] == "blkid" and retcode == 0: blkid_dict = Blkid.parse_blkid_output( proc.stdout.read()) elif key[0] == "osprober" and retcode == 0: os_prober_dict = OsProber.parse_os_prober_output( proc.stdout.read()) elif key[ 0] == "sfdisk" and retcode == 0 and proc.stdout is not None: sfdisk_dict_dict[ key[1]] = Sfdisk.parse_sfdisk_dump_output( proc.stdout.read()) elif key[ 0] == "parted" and retcode == 0 and proc.stdout is not None: if proc.stderr is not None: stderr = proc.stderr.read() print("parted with key " + key + " had stderr " + stderr) if "unrecognized disk label" not in stderr: parted_dict_dict[ key[1]] = Parted.parse_parted_output( proc.stdout.read()) else: print( "COULD NOT PROCESS process launched with key " + str(key) + " return code" + str(retcode)) if proc.stdout is not None: print("stdout:" + proc.stdout.read()) if proc.stderr is not None: print(" stderr:" + proc.stderr.read()) else: # No process is done, wait a bit and check again. time.sleep(0.1) continue else: raise Exception("Invalid drive query mode") self.drive_state = CombinedDriveState.construct_combined_drive_state_dict( lsblk_json_dict, blkid_dict, os_prober_dict, parted_dict_dict, sfdisk_dict_dict) pp = pprint.PrettyPrinter(indent=4) pp.pprint(self.drive_state) drive_query_end_time = datetime.now() print("Drive query took: " + str((drive_query_end_time - drive_query_start_time))) GLib.idle_add(self.populate_drive_selection_table)