def test_geometry_parsing(self):
        input_sfdisk_geometry = "/dev/sda: 12345 cylinders, 255 heads, 63 sectors/track"
        geometry_dict = Sfdisk.parse_sfdisk_show_geometry(input_sfdisk_geometry)

        expected_geometry_dict = {
            'cylinders': 12345,
            'heads': 255,
            'sectors': 63
        }
        self.assertDictEqual(expected_geometry_dict, geometry_dict)
Пример #2
0
    def query_drive_chs_geometry(long_device_node, image_sfdisk_geometry_dict,
                                 logger):
        edd_geometry_dict = None
        sfdisk_geometry_dict = None
        is_edd_geometry = False
        return_message = ""
        # First try getting the geometry from the EDD
        is_success, value = ChsUtilities._get_chs_geometry_from_edd(
            long_device_node, logger)
        if is_success:
            edd_geometry_dict = value
            print("edd geometry dict " + long_device_node + ": " +
                  str(edd_geometry_dict))
        else:
            # Save return message
            return_message = value

        # Clonezilla's -e (--load-geometry) option is not set by default. The option sets the
        # load_HD_CHS_from_img variable that requests to load the CHS geometry from the -chs.sf file within the
        # image. This data is generally not relevant for the destination drive, which may have different geometry
        # from the original drive. TODO: Consider implementing this option (using image_sfdisk_geometry_dict)

        # When load_HD_CHS_from_img is not set then Clonezilla queries the destination drive's geometry using sfdisk

        # The --show-geometry call takes just a fraction of a second to run, so unlike Clonezilla always run it, because
        # that places both values in the log file which may be useful for debugging.
        process, flat_command_string, failed_message = Utility.run(
            "Retreiving disk geometry with sfdisk",
            ["sfdisk", "--show-geometry", long_device_node],
            use_c_locale=True,
            logger=logger)
        if process.returncode == 0:
            sfdisk_geometry_dict = Sfdisk.parse_sfdisk_show_geometry(
                process.stdout)
            print("sfdisk geometry dict " + long_device_node + ": " +
                  str(sfdisk_geometry_dict))
        else:
            return_message += failed_message

        geometry_dict = None
        if edd_geometry_dict is not None and len(edd_geometry_dict) != 0:
            is_edd_geometry = True
            geometry_dict = edd_geometry_dict
        elif sfdisk_geometry_dict is not None and len(
                sfdisk_geometry_dict) != 0:
            is_edd_geometry = False
            geometry_dict = sfdisk_geometry_dict

        return geometry_dict, is_edd_geometry, return_message
    def test_sfdisk_parsing_many_fields(self):
        input_sfdisk_string = """label: gpt
label-id: A9A88DF9-EA91-4E83-8616-0C1BB3BA28BA
device: /dev/sda
unit: sectors
first-lba: 34
last-lba: 104857566
sector-size: 512

/dev/sda1 : start=        2048, size=     1083392, type=DE94BBA4-06D1-4D40-A16A-BFD50179D6AC, uuid=E3E94AE6-C2AB-495A-A955-A32140F56C2A, name="Basic data partition", attrs="RequiredPartition GUID:63"
/dev/sda2 : start=     1085440, size=      204800, type=C12A7328-F81F-11D2-BA4B-00A0C93EC93B, uuid=7423670F-D0C3-4724-82F7-3185350A1BF7, name="EFI system partition", attrs="GUID:63"
/dev/sda3 : start=     1290240, size=       32768, type=E3C9E316-0B5C-4DB8-817D-F92DF00215AE, uuid=229EF65D-3315-4824-945E-9719FEDA2F42, name="Microsoft reserved partition", attrs="GUID:63"
/dev/sda4 : start=     1323008, size=   103532544, type=EBD0A0A2-B9E5-4433-87C0-68B6B72699C7, uuid=1F1C6171-D10C-44C0-BA9B-E12995D7F4DA, name="Basic data partition"""
        sfdisk_dict = Sfdisk.parse_sfdisk_dump_output(input_sfdisk_string)
        pprint.pprint(sfdisk_dict)
    def test_sfdisk_parsing(self):
        # Output of: sfdisk -l /dev/sdb
        # --dumps vs --list
        input_sfdisk_string = """label: gpt
label-id: 20B6B2AC-F20E-48A7-83C5-6B684328AFAB
device: /dev/sdb
unit: sectors
first-lba: 34
last-lba: 16223214

/dev/sdb1 : start=        2048, size=     1849344, type=0FC63DAF-8483-4772-8E79-3D69D8477DE4, uuid=ABEC187C-BD81-4F2F-B8C0-0DB4892055E2
/dev/sdb2 : start=     1851392, size=     1351680, type=EBD0A0A2-B9E5-4433-87C0-68B6B72699C7, uuid=9A0ECA81-6D1A-497E-8614-B70404C37918
/dev/sdb3 : start=     3203072, size=    13019136, type=EBD0A0A2-B9E5-4433-87C0-68B6B72699C7, uuid=B65EDC17-2C20-46B0-BD65-C92167FFEEC2
"""
        sfdisk_dict = Sfdisk.parse_sfdisk_dump_output(input_sfdisk_string)
        expected_sfdisk_dict = {
            'label': 'gpt',
            'label-id': '20B6B2AC-F20E-48A7-83C5-6B684328AFAB',
            'device': '/dev/sdb',
            'first-lba': 34,
            'last-lba': 16223214,
            'partitions': {
                '/dev/sdb1': {
                    'start': 2048,
                    'size': 1849344,
                    'type': '0FC63DAF-8483-4772-8E79-3D69D8477DE4',
                    'uuid': 'ABEC187C-BD81-4F2F-B8C0-0DB4892055E2'
                },
                '/dev/sdb2': {
                    'start': 1851392,
                    'size': 1351680,
                    'type': 'EBD0A0A2-B9E5-4433-87C0-68B6B72699C7',
                    'uuid': '9A0ECA81-6D1A-497E-8614-B70404C37918'
                },
                '/dev/sdb3': {
                    'start': 3203072,
                    'size': 13019136,
                    'type': 'EBD0A0A2-B9E5-4433-87C0-68B6B72699C7',
                    'uuid': 'B65EDC17-2C20-46B0-BD65-C92167FFEEC2'
                }
            }
        }
        print("actual:" + str(sfdisk_dict))
    def test_combined_drive_state(self):
        parted_dict_dict = {}
        sfdict_dict_dict = {}

        lsblk_json_output = """{
   "blockdevices": [
      {"kname":"/dev/loop0", "name":"/dev/loop0", "size":698761216, "type":"loop", "fstype":"squashfs", "mountpoint":"/rofs", "model":null},
      {"kname":"/dev/sda", "name":"/dev/sda", "size":34359738368, "type":"disk", "fstype":null, "mountpoint":null, "model":"VBOX_HARDDISK",
         "children": [
            {"kname":"/dev/sda1", "name":"/dev/sda1", "size":34357641216, "type":"part", "fstype":"ntfs", "mountpoint":"/mnt/backup", "model":null}
         ]
      },
      {"kname":"/dev/sdb", "name":"/dev/sdb", "size":1073741824, "type":"disk", "fstype":"LVM2_member", "mountpoint":null, "model":"VBOX_HARDDISK",
         "children": [
            {"kname":"/dev/dm-0", "name":"/dev/mapper/vgtest-lvtest", "size":1069547520, "type":"lvm", "fstype":"ext4", "mountpoint":null, "model":null}
         ]
      },
      {"kname":"/dev/sdc", "name":"/dev/sdc", "size":1610612736, "type":"disk", "fstype":"ntfs", "mountpoint":null, "model":"VBOX_HARDDISK"},
      {"kname":"/dev/sdd", "name":"/dev/sdd", "size":2147483648, "type":"disk", "fstype":null, "mountpoint":null, "model":"VBOX_HARDDISK",
         "children": [
            {"kname":"/dev/sdd1", "name":"/dev/sdd1", "size":3145728, "type":"part", "fstype":"ext4", "mountpoint":null, "model":null},
            {"kname":"/dev/sdd2", "name":"/dev/sdd2", "size":44040192, "type":"part", "fstype":"ext4", "mountpoint":null, "model":null},
            {"kname":"/dev/sdd4", "name":"/dev/sdd4", "size":1024, "type":"part", "fstype":null, "mountpoint":null, "model":null},
            {"kname":"/dev/sdd5", "name":"/dev/sdd5", "size":12582912, "type":"part", "fstype":"ext4", "mountpoint":null, "model":null},
            {"kname":"/dev/sdd6", "name":"/dev/sdd6", "size":4194304, "type":"part", "fstype":"ext4", "mountpoint":null, "model":null},
            {"kname":"/dev/sdd7", "name":"/dev/sdd7", "size":28311552, "type":"part", "fstype":"ext4", "mountpoint":null, "model":null},
            {"kname":"/dev/sdd8", "name":"/dev/sdd8", "size":4194304, "type":"part", "fstype":"ext4", "mountpoint":null, "model":null},
            {"kname":"/dev/sdd9", "name":"/dev/sdd9", "size":20971520, "type":"part", "fstype":"ext4", "mountpoint":null, "model":null},
            {"kname":"/dev/sdd10", "name":"/dev/sdd10", "size":83886080, "type":"part", "fstype":"ext4", "mountpoint":null, "model":null},
            {"kname":"/dev/sdd11", "name":"/dev/sdd11", "size":72351744, "type":"part", "fstype":"ext4", "mountpoint":null, "model":null},
            {"kname":"/dev/sdd12", "name":"/dev/sdd12", "size":18874368, "type":"part", "fstype":"ext4", "mountpoint":null, "model":null},
            {"kname":"/dev/sdd13", "name":"/dev/sdd13", "size":29360128, "type":"part", "fstype":"ext4", "mountpoint":null, "model":null},
            {"kname":"/dev/sdd14", "name":"/dev/sdd14", "size":45088768, "type":"part", "fstype":"ext4", "mountpoint":null, "model":null}
         ]
      },
      {"kname":"/dev/sde", "name":"/dev/sde", "size":2684354560, "type":"disk", "fstype":null, "mountpoint":null, "model":"VBOX_HARDDISK",
         "children": [
            {"kname":"/dev/sde1", "name":"/dev/sde1", "size":113246208, "type":"part", "fstype":"ntfs", "mountpoint":null, "model":null},
            {"kname":"/dev/sde2", "name":"/dev/sde2", "size":67108864, "type":"part", "fstype":"vfat", "mountpoint":null, "model":null},
            {"kname":"/dev/sde3", "name":"/dev/sde3", "size":2277507072, "type":"part", "fstype":"ntfs", "mountpoint":null, "model":null},
            {"kname":"/dev/sde4", "name":"/dev/sde4", "size":224395264, "type":"part", "fstype":"ext4", "mountpoint":null, "model":null}
         ]
      },
      {"kname":"/dev/sdf", "name":"/dev/sdf", "size":3221225472, "type":"disk", "fstype":null, "mountpoint":null, "model":"VBOX_HARDDISK",
         "children": [
            {"kname":"/dev/sdf1", "name":"/dev/sdf1", "size":268435456, "type":"part", "fstype":"btrfs", "mountpoint":null, "model":null},
            {"kname":"/dev/sdf2", "name":"/dev/sdf2", "size":147849216, "type":"part", "fstype":"ext2", "mountpoint":null, "model":null},
            {"kname":"/dev/sdf3", "name":"/dev/sdf3", "size":1024, "type":"part", "fstype":null, "mountpoint":null, "model":null},
            {"kname":"/dev/sdf5", "name":"/dev/sdf5", "size":52428800, "type":"part", "fstype":"ext4", "mountpoint":null, "model":null},
            {"kname":"/dev/sdf6", "name":"/dev/sdf6", "size":34603008, "type":"part", "fstype":"ntfs", "mountpoint":null, "model":null},
            {"kname":"/dev/sdf7", "name":"/dev/sdf7", "size":73400320, "type":"part", "fstype":"vfat", "mountpoint":null, "model":null},
            {"kname":"/dev/sdf8", "name":"/dev/sdf8", "size":47185920, "type":"part", "fstype":"vfat", "mountpoint":null, "model":null},
            {"kname":"/dev/sdf9", "name":"/dev/sdf9", "size":55574528, "type":"part", "fstype":"reiser4", "mountpoint":null, "model":null},
            {"kname":"/dev/sdf10", "name":"/dev/sdf10", "size":35651584, "type":"part", "fstype":"reiserfs", "mountpoint":null, "model":null},
            {"kname":"/dev/sdf11", "name":"/dev/sdf11", "size":36700160, "type":"part", "fstype":"swap", "mountpoint":null, "model":null},
            {"kname":"/dev/sdf12", "name":"/dev/sdf12", "size":379584512, "type":"part", "fstype":"ntfs", "mountpoint":null, "model":null},
            {"kname":"/dev/sdf13", "name":"/dev/sdf13", "size":45088768, "type":"part", "fstype":"udf", "mountpoint":null, "model":null},
            {"kname":"/dev/sdf14", "name":"/dev/sdf14", "size":68157440, "type":"part", "fstype":"xfs", "mountpoint":null, "model":null},
            {"kname":"/dev/sdf15", "name":"/dev/sdf15", "size":50331648, "type":"part", "fstype":null, "mountpoint":null, "model":null},
            {"kname":"/dev/sdf16", "name":"/dev/sdf16", "size":40894464, "type":"part", "fstype":null, "mountpoint":null, "model":null},
            {"kname":"/dev/sdf17", "name":"/dev/sdf17", "size":11534336, "type":"part", "fstype":"minix", "mountpoint":null, "model":null},
            {"kname":"/dev/sdf18", "name":"/dev/sdf18", "size":62914560, "type":"part", "fstype":"f2fs", "mountpoint":null, "model":null},
            {"kname":"/dev/sdf19", "name":"/dev/sdf19", "size":135266304, "type":"part", "fstype":"nilfs2", "mountpoint":null, "model":null},
            {"kname":"/dev/sdf20", "name":"/dev/sdf20", "size":1656750080, "type":"part", "fstype":"ntfs", "mountpoint":null, "model":null}
         ]
      },
      {"kname":"/dev/sdg", "name":"/dev/sdg", "size":3758096384, "type":"disk", "fstype":null, "mountpoint":null, "model":"VBOX_HARDDISK"},
      {"kname":"/dev/sdh", "name":"/dev/sdh", "size":4294967296, "type":"disk", "fstype":null, "mountpoint":null, "model":"VBOX_HARDDISK",
         "children": [
            {"kname":"/dev/sdh1", "name":"/dev/sdh1", "size":104857600, "type":"part", "fstype":"LVM2_member", "mountpoint":null, "model":null,
               "children": [
                  {"kname":"/dev/dm-1", "name":"/dev/mapper/vgtest1-lvtest1", "size":54525952, "type":"lvm", "fstype":"vfat", "mountpoint":null, "model":null}
               ]
            },
            {"kname":"/dev/sdh2", "name":"/dev/sdh2", "size":104857600, "type":"part", "fstype":"LVM2_member", "mountpoint":null, "model":null,
               "children": [
                  {"kname":"/dev/dm-3", "name":"/dev/mapper/vgtest2-lvtest2", "size":54525952, "type":"lvm", "fstype":"ntfs", "mountpoint":null, "model":null}
               ]
            },
            {"kname":"/dev/sdh3", "name":"/dev/sdh3", "size":104857600, "type":"part", "fstype":"LVM2_member", "mountpoint":null, "model":null,
               "children": [
                  {"kname":"/dev/dm-2", "name":"/dev/mapper/vgtest3-lvtest3", "size":54525952, "type":"lvm", "fstype":"ext4", "mountpoint":null, "model":null}
               ]
            }
         ]
      },
      {"kname":"/dev/sdi", "name":"/dev/sdi", "size":8589934592, "type":"disk", "fstype":null, "mountpoint":null, "model":"VBOX_HARDDISK",
         "children": [
            {"kname":"/dev/sdi1", "name":"/dev/sdi1", "size":536870912, "type":"part", "fstype":"vfat", "mountpoint":null, "model":null},
            {"kname":"/dev/sdi2", "name":"/dev/sdi2", "size":255852544, "type":"part", "fstype":"ext2", "mountpoint":null, "model":null},
            {"kname":"/dev/sdi3", "name":"/dev/sdi3", "size":7795113984, "type":"part", "fstype":"crypto_LUKS", "mountpoint":null, "model":null}
         ]
      },
      {"kname":"/dev/sdj", "name":"/dev/sdj", "size":53687091200, "type":"disk", "fstype":null, "mountpoint":null, "model":"VBOX_HARDDISK",
         "children": [
            {"kname":"/dev/sdj1", "name":"/dev/sdj1", "size":554696704, "type":"part", "fstype":"ntfs", "mountpoint":null, "model":null},
            {"kname":"/dev/sdj2", "name":"/dev/sdj2", "size":104857600, "type":"part", "fstype":"vfat", "mountpoint":null, "model":null},
            {"kname":"/dev/sdj3", "name":"/dev/sdj3", "size":16777216, "type":"part", "fstype":null, "mountpoint":null, "model":null},
            {"kname":"/dev/sdj4", "name":"/dev/sdj4", "size":53008662528, "type":"part", "fstype":"ntfs", "mountpoint":null, "model":null}
         ]
      },
      {"kname":"/dev/sdk", "name":"/dev/sdk", "size":1073741824, "type":"disk", "fstype":null, "mountpoint":null, "model":"VBOX_HARDDISK",
         "children": [
            {"kname":"/dev/sdk1", "name":"/dev/sdk1", "size":1072693248, "type":"part", "fstype":"linux_raid_member", "mountpoint":null, "model":null,
               "children": [
                  {"kname":"/dev/md127", "name":"/dev/md127", "size":1071644672, "type":"raid1", "fstype":"ext4", "mountpoint":null, "model":null}
               ]
            }
         ]
      },
      {"kname":"/dev/sdl", "name":"/dev/sdl", "size":1073741824, "type":"disk", "fstype":null, "mountpoint":null, "model":"VBOX_HARDDISK",
         "children": [
            {"kname":"/dev/sdl1", "name":"/dev/sdl1", "size":1072693248, "type":"part", "fstype":"linux_raid_member", "mountpoint":null, "model":null,
               "children": [
                  {"kname":"/dev/md127", "name":"/dev/md127", "size":1071644672, "type":"raid1", "fstype":"ext4", "mountpoint":null, "model":null}
               ]
            }
         ]
      },
      {"kname":"/dev/sdm", "name":"/dev/sdm", "size":1073741824, "type":"disk", "fstype":null, "mountpoint":null, "model":"VBOX_HARDDISK",
         "children": [
            {"kname":"/dev/sdm1", "name":"/dev/sdm1", "size":1072693248, "type":"part", "fstype":"linux_raid_member", "mountpoint":null, "model":null,
               "children": [
                  {"kname":"/dev/md0", "name":"/dev/md0", "size":1071644672, "type":"raid1", "fstype":"ext4", "mountpoint":null, "model":null}
               ]
            }
         ]
      },
      {"kname":"/dev/sdn", "name":"/dev/sdn", "size":1073741824, "type":"disk", "fstype":null, "mountpoint":null, "model":"VBOX_HARDDISK",
         "children": [
            {"kname":"/dev/sdn1", "name":"/dev/sdn1", "size":1072693248, "type":"part", "fstype":"linux_raid_member", "mountpoint":null, "model":null,
               "children": [
                  {"kname":"/dev/md0", "name":"/dev/md0", "size":1071644672, "type":"raid1", "fstype":"ext4", "mountpoint":null, "model":null}
               ]
            }
         ]
      },
      {"kname":"/dev/sdo", "name":"/dev/sdo", "size":1610612736, "type":"disk", "fstype":"ext4", "mountpoint":null, "model":"VBOX_HARDDISK"},
      {"kname":"/dev/sr0", "name":"/dev/sr0", "size":805623808, "type":"rom", "fstype":"iso9660", "mountpoint":"/cdrom", "model":"VBOX_CD-ROM"}
   ]
}"""

        lsblk_json_dict = json.loads(lsblk_json_output)

        input_blkid_string = """/dev/mapper/vgtest-lvtest: UUID="b9131c40-9742-416c-b019-8b11481a86ac" TYPE="ext4"
/dev/sda1: UUID="5C2F5F000C198BC5" TYPE="ntfs" PTTYPE="dos" PARTUUID="9aa1db09-6b7c-45a6-b392-c14518730297"
/dev/sdc: UUID="0818993868533997" TYPE="ntfs" PTTYPE="dos"
/dev/sdd1: UUID="925ef222-bc6d-4e46-a31d-af3443821ed6" TYPE="ext4" PARTUUID="1dbd2bfc-01"
/dev/sdd10: UUID="2708289c-c559-4d5c-bed1-98735f673bc0" TYPE="ext4" PARTUUID="1dbd2bfc-0a"
/dev/sdd11: UUID="c1dc5a1d-d22b-4a64-933f-5495a454eb05" TYPE="ext4" PARTUUID="1dbd2bfc-0b"
/dev/sdd12: UUID="47d1b78f-dd89-44d2-8af6-e7c01f1b635e" TYPE="ext4" PARTUUID="1dbd2bfc-0c"
/dev/sdd13: UUID="39fb1fdd-2cb8-449d-8d15-3eb7ab682677" TYPE="ext4" PARTUUID="1dbd2bfc-0d"
/dev/sdd14: UUID="f40bf79e-619e-422c-a546-ba78caa4ced7" TYPE="ext4" PARTUUID="1dbd2bfc-0e"
/dev/sdd2: UUID="206b6149-a675-4149-b368-8d7f158d9da2" TYPE="ext4" PARTUUID="1dbd2bfc-02"
/dev/sdd5: UUID="b8a96bc5-43c5-4180-a6c6-12466b544ced" TYPE="ext4" PARTUUID="1dbd2bfc-05"
/dev/sdd6: UUID="0897426b-24ee-47ed-8c85-e034205a43aa" TYPE="ext4" PARTUUID="1dbd2bfc-06"
/dev/sdd7: UUID="c4f8b878-1a4e-424b-93dd-17ea6d8e14d2" TYPE="ext4" PARTUUID="1dbd2bfc-07"
/dev/sdd8: UUID="e4fa2ad0-a206-447e-9c14-829d52c10407" TYPE="ext4" PARTUUID="1dbd2bfc-08"
/dev/sdd9: UUID="34648ad0-2fc1-47e1-8503-dc88b5152386" TYPE="ext4" PARTUUID="1dbd2bfc-09"
/dev/sde1: UUID="39DE69624116B96D" TYPE="ntfs" PTTYPE="dos" PARTUUID="4c6ec2cf-8de5-43c0-9757-41a596de5486"
/dev/sde2: UUID="B155-F891" TYPE="vfat" PARTUUID="be9f4179-560c-4bc9-8366-ae214f69a16e"
/dev/sde3: UUID="286C1DA536C63454" TYPE="ntfs" PTTYPE="dos" PARTUUID="363bdd4e-d6a1-42e6-a4da-616cd3e46952"
/dev/sde4: UUID="11d0533c-feba-4a07-ae30-7ff3720a051e" TYPE="ext4" PARTUUID="ced0279d-0096-4ebe-8425-4c6c9d46a4d2"
/dev/sdf1: UUID="c297cbb6-acb5-4d6a-9e34-af7558e120a0" UUID_SUB="695a452a-c43c-477c-8d5c-a21802e18091" TYPE="btrfs" PARTUUID="43c18652-01"
/dev/sdf12: UUID="023E47301DBC9964" TYPE="ntfs" PTTYPE="dos" PARTUUID="43c18652-0c"
/dev/sdf13: UUID="5f4b3897b65975a8" LABEL="LinuxUDF" TYPE="udf" PARTUUID="43c18652-0d"
/dev/sdf17: TYPE="minix" PARTUUID="43c18652-11"
/dev/sdf18: UUID="d73d2f4f-fba2-43ef-ad7a-c5fe877bf8a9" TYPE="f2fs" PARTUUID="43c18652-12"
/dev/sdf2: UUID="04944da2-d784-4c9f-b143-060d2776c4b1" TYPE="ext2" PARTUUID="43c18652-02"
/dev/sdf20: UUID="718E7F8E7F42D1B8" TYPE="ntfs" PTTYPE="dos" PARTUUID="43c18652-14"
/dev/sdf5: UUID="a2f8e4d6-ac83-4c57-a046-b7122c0398d5" TYPE="ext4" PARTUUID="43c18652-05"
/dev/sdf6: UUID="1297CD00121D448B" TYPE="ntfs" PTTYPE="dos" PARTUUID="43c18652-06"
/dev/sdf7: SEC_TYPE="msdos" UUID="CF57-1227" TYPE="vfat" PARTUUID="43c18652-07"
/dev/sdf8: UUID="CF6A-B2D0" TYPE="vfat" PARTUUID="43c18652-08"
/dev/sdi1: UUID="F5A2-3D31" TYPE="vfat" PARTUUID="b227e8b3-c8ea-448f-9657-53670575e6a8"
/dev/sdi2: UUID="80a2000c-c375-4a74-b6f9-2f1e1c7a8958" TYPE="ext2" PARTUUID="a3b889cb-31af-469d-a584-34edb323c62a"
/dev/sdj1: LABEL="Recovery" UUID="5C22168F22166E70" TYPE="ntfs" PARTLABEL="Basic data partition" PARTUUID="e3e94ae6-c2ab-495a-a955-a32140f56c2a"
/dev/sdj2: UUID="2C16-C81E" TYPE="vfat" PARTLABEL="EFI system partition" PARTUUID="7423670f-d0c3-4724-82f7-3185350a1bf7"
/dev/sdj4: UUID="DA40176E4017511D" TYPE="ntfs" PARTLABEL="Basic data partition" PARTUUID="1f1c6171-d10c-44c0-ba9b-e12995d7f4da"
/dev/sdk1: UUID="b4b3109e-816d-bcee-66c4-151e60fd8e23" UUID_SUB="cbb347a0-e478-39be-5dae-8b2955375ff6" LABEL="ubuntu:0" TYPE="linux_raid_member" PARTUUID="edb03a25-01"
/dev/sdl1: UUID="b4b3109e-816d-bcee-66c4-151e60fd8e23" UUID_SUB="17ee1620-2ccc-c266-1cd8-a567f6896d7a" LABEL="ubuntu:0" TYPE="linux_raid_member" PARTUUID="c6774609-01"
/dev/sr0: UUID="2020-09-04-10-27-14-00" LABEL="Rescuezilla" TYPE="iso9660" PTTYPE="PMBR"
/dev/loop0: TYPE="squashfs"
/dev/sdb: UUID="i20UTQ-OaX3-c6nB-CiBv-Gav1-hgVf-tEkO2W" TYPE="LVM2_member"
/dev/sdf9: UUID="0ab2eb0a-6c94-4f78-8206-7d725bbeb4e5" TYPE="reiser4" PARTUUID="43c18652-09"
/dev/sdf10: UUID="d230da2e-5359-43cb-827e-40c48a0a572b" TYPE="reiserfs" PARTUUID="43c18652-0a"
/dev/sdf11: UUID="217c8359-2ce6-4d07-bc44-4dcae84bc089" TYPE="swap" PARTUUID="43c18652-0b"
/dev/sdf14: UUID="3211b89b-b5ce-461d-8e6e-8acfaaa4bb28" TYPE="xfs" PARTUUID="43c18652-0e"
/dev/sdf19: UUID="957acceb-12f3-4574-ac1f-fb6e53f1a22f" TYPE="nilfs2" PARTUUID="43c18652-13"
/dev/sdh1: UUID="aNcRGF-4HyS-UoFu-aNpE-tXKI-nyeu-LiKaTm" TYPE="LVM2_member" PARTUUID="c64c40a3-4eec-4b86-820a-f8068ad3f686"
/dev/sdh2: UUID="cwuIbV-pb5s-9whu-eezX-WiZU-4wlY-gvuLZS" TYPE="LVM2_member" PARTUUID="45e285ea-caa3-4fc7-a7fe-9727d3198f09"
/dev/sdh3: UUID="jStAm5-tt1J-6uRa-HElo-EROm-DgjD-B5OJ3B" TYPE="LVM2_member" PARTUUID="4a014da5-75ca-45b2-9b62-7e3380c14570"
/dev/sdi3: UUID="17147edc-1e54-4300-b47a-01b138581512" TYPE="crypto_LUKS" PARTUUID="5f64fbd4-dcad-4467-8a8b-f3e009871661"
/dev/md127: UUID="0ca32d11-af2c-4512-9e68-bef318870149" TYPE="ext4"
/dev/mapper/vgtest1-lvtest1: SEC_TYPE="msdos" UUID="0559-959C" TYPE="vfat"
/dev/mapper/vgtest3-lvtest3: UUID="846a2cbd-b040-4afd-bb1c-8ecd6e15f0c2" TYPE="ext4"
/dev/mapper/vgtest2-lvtest2: UUID="588E895406ECC468" TYPE="ntfs" PTTYPE="dos"
/dev/sdf15: PARTUUID="43c18652-0f"
/dev/sdf16: PARTUUID="43c18652-10"
/dev/sdj3: PARTLABEL="Microsoft reserved partition" PARTUUID="229ef65d-3315-4824-945e-9719feda2f42"
/dev/sdm1: UUID="75515f3b-95ea-ef00-e327-c48e2784e416" UUID_SUB="52b46420-82ff-7e66-ff87-4195a846f804" LABEL="ubuntu:0" TYPE="linux_raid_member" PARTUUID="e02572d4-01"
/dev/sdn1: UUID="75515f3b-95ea-ef00-e327-c48e2784e416" UUID_SUB="5a61afd1-e3eb-d319-f6ca-a0135c0889de" LABEL="ubuntu:0" TYPE="linux_raid_member" PARTUUID="1e066523-01"
/dev/md0: UUID="42ba6b53-6752-4ca7-b5a7-95a5e766ce97" BLOCK_SIZE="4096" TYPE="ext4"
/dev/sdo: UUID="642af36d-7695-4376-a6f9-a35a15552e33" BLOCK_SIZE="4096" TYPE="ext4" """
        blkid_dict = Blkid.parse_blkid_output(input_blkid_string)

        os_prober_contents = """/dev/sdc2@/efi/Microsoft/Boot/bootmgfw.efi:Windows Boot Manager:Windows:efi
        /dev/sdd1:Debian GNU/Linux 10 (buster):Debian:linux"""
        osprober_dict = OsProber.parse_os_prober_output(os_prober_contents)

        input_parted_gpt_string = """Model: ATA VBOX HARDDISK (scsi)
Disk /dev/sde: 2684354560B
Sector size (logical/physical): 512B/512B
Partition Table: gpt
Disk Flags: 

Number  Start        End          Size         File system  Name  Flags
 1      1048576B     114294783B   113246208B   ntfs               msftdata
 2      114294784B   181403647B   67108864B    fat32              msftdata
 3      181403648B   2458910719B  2277507072B  ntfs               msftdata
 4      2458910720B  2683305983B  224395264B   ext4"""
        parted_dict_dict['/dev/sde'] = Parted.parse_parted_output(
            input_parted_gpt_string)

        input_sfdisk_gpt_string = """label: gpt
label-id: 5FA01E95-F3E8-4B92-845B-843609E4EF0D
device: /dev/sde
unit: sectors
first-lba: 34
last-lba: 5242846

/dev/sde1 : start=        2048, size=      221184, type=EBD0A0A2-B9E5-4433-87C0-68B6B72699C7, uuid=4C6EC2CF-8DE5-43C0-9757-41A596DE5486
/dev/sde2 : start=      223232, size=      131072, type=EBD0A0A2-B9E5-4433-87C0-68B6B72699C7, uuid=BE9F4179-560C-4BC9-8366-AE214F69A16E
/dev/sde3 : start=      354304, size=     4448256, type=EBD0A0A2-B9E5-4433-87C0-68B6B72699C7, uuid=363BDD4E-D6A1-42E6-A4DA-616CD3E46952
/dev/sde4 : start=     4802560, size=      438272, type=0FC63DAF-8483-4772-8E79-3D69D8477DE4, uuid=CED0279D-0096-4EBE-8425-4C6C9D46A4D2"""
        sfdict_dict_dict['/dev/sde'] = Sfdisk.parse_sfdisk_dump_output(
            input_sfdisk_gpt_string)

        input_parted_mbr_string = """Model: ATA VBOX HARDDISK (scsi)
Disk /dev/sdd: 2147483648B
Sector size (logical/physical): 512B/512B
Partition Table: msdos
Disk Flags: 

Number  Start        End          Size         Type      File system  Flags
 4      1048576B     2100297727B  2099249152B  extended
14      2097152B     47185919B    45088768B    logical   ext4
13      48234496B    77594623B    29360128B    logical   ext4
 5      78643200B    91226111B    12582912B    logical   ext4
 9      92274688B    113246207B   20971520B    logical   ext4
 6      114294784B   118489087B   4194304B     logical   ext2
 7      119537664B   147849215B   28311552B    logical   ext4
 8      148897792B   153092095B   4194304B     logical   ext2
10      154140672B   238026751B   83886080B    logical   ext4
11      239075328B   311427071B   72351744B    logical   ext4
12      312475648B   331350015B   18874368B    logical   ext4
 1      2100297728B  2103443455B  3145728B     primary   ext2
 2      2103443456B  2147483647B  44040192B    primary   ext4"""
        parted_dict_dict['/dev/sdd'] = Parted.parse_parted_output(
            input_parted_mbr_string)

        input_parted_sdm_string = """Model: ATA VBOX HARDDISK (scsi)
Disk /dev/sdm: 1073741824B
Sector size (logical/physical): 512B/512B
Partition Table: msdos
Disk Flags:

Number  Start     End          Size         Type     File system  Flags
 1      1048576B  1073741823B  1072693248B  primary               raid

"""
        parted_dict_dict['/dev/sdm'] = Parted.parse_parted_output(
            input_parted_sdm_string)

        input_parted_sdn_string = """
Model: ATA VBOX HARDDISK (scsi)
Disk /dev/sdn: 1073741824B
Sector size (logical/physical): 512B/512B
Partition Table: msdos
Disk Flags:

Number  Start     End          Size         Type     File system  Flags
 1      1048576B  1073741823B  1072693248B  primary               raid

"""
        parted_dict_dict['/dev/sdn'] = Parted.parse_parted_output(
            input_parted_sdn_string)

        input_parted_fs_directly_on_disk_string = """
Model: ATA VBOX HARDDISK (scsi)
Disk /dev/sdo: 1610612736B
Sector size (logical/physical): 512B/512B
Partition Table: loop
Disk Flags:

Number  Start  End          Size         File system  Flags
 1      0B     1610612735B  1610612736B  ext4

"""
        parted_dict_dict['/dev/sdo'] = Parted.parse_parted_output(
            input_parted_fs_directly_on_disk_string)

        input_sfdisk_sdm_string = """label: dos
label-id: 0xe02572d4
device: /dev/sdm
unit: sectors
sector-size: 512

/dev/sdm1 : start=        2048, size=     2095104, type=fd"""
        sfdict_dict_dict['/dev/sdm'] = Sfdisk.parse_sfdisk_dump_output(
            input_sfdisk_sdm_string)

        input_sfdisk_sdn_string = """label: dos
label-id: 0x1e066523
device: /dev/sdn
unit: sectors
sector-size: 512

/dev/sdn1 : start=        2048, size=     2095104, type=fd"""
        sfdict_dict_dict['/dev/sdn'] = Sfdisk.parse_sfdisk_dump_output(
            input_sfdisk_sdn_string)

        input_sfdisk_mbr_string = """label: dos
label-id: 0x1dbd2bfc
device: /dev/sdd
unit: sectors

/dev/sdd1 : start=     4102144, size=        6144, type=83
/dev/sdd2 : start=     4108288, size=       86016, type=83
/dev/sdd4 : start=        2048, size=     4100096, type=5
/dev/sdd5 : start=      153600, size=       24576, type=83
/dev/sdd6 : start=      223232, size=        8192, type=83
/dev/sdd7 : start=      233472, size=       55296, type=83
/dev/sdd8 : start=      290816, size=        8192, type=83
/dev/sdd9 : start=      180224, size=       40960, type=83
/dev/sdd10 : start=      301056, size=      163840, type=83
/dev/sdd11 : start=      466944, size=      141312, type=83
/dev/sdd12 : start=      610304, size=       36864, type=83
/dev/sdd13 : start=       94208, size=       57344, type=83
/dev/sdd14 : start=        4096, size=       88064, type=83 """
        sfdict_dict_dict['/dev/sdd'] = Sfdisk.parse_sfdisk_dump_output(
            input_sfdisk_mbr_string)
        pp = pprint.PrettyPrinter(indent=4)
        combined_drive_state_dict = CombinedDriveState.construct_combined_drive_state_dict(
            lsblk_json_dict, blkid_dict, osprober_dict, parted_dict_dict,
            sfdict_dict_dict)
        pp.pprint(combined_drive_state_dict)
        CombinedDriveState.get_first_partition(
            combined_drive_state_dict['/dev/sdd']['partitions'])
    def __init__(self, absolute_path, enduser_filename, filename):
        # Redo Backup images never need decryption
        self.is_needs_decryption = False
        # Path to ".backup" file
        self.absolute_path = absolute_path
        self.enduser_filename = enduser_filename
        self.proc = collections.OrderedDict()
        env = Utility.get_env_C_locale()
        print("Reading backup : " + absolute_path)
        dirname = os.path.dirname(absolute_path)
        self.warning_dict = {}

        if filename.endswith(".backup"):
            prefix = filename.split(".backup")[0]
            print("prefix: " + prefix)
        else:
            raise ValueError("Expected Rescuezilla backup to end with .backup: " + absolute_path)

        rescuezilla_version_abs_path = os.path.join(dirname, prefix + ".rescuezilla.backup_version")
        if not os.path.exists(rescuezilla_version_abs_path):
            self.image_format = "REDOBACKUP_0.9.8_1.0.4_FORMAT"
        else:
            self.image_format = "RESCUEZILLA_1.5_FORMAT"
            self.rescuezilla_version = Utility.read_file_into_string(rescuezilla_version_abs_path).strip()
            print("Backup originally created with Rescuezilla version: " + self.rescuezilla_version)

        self.last_modified_timestamp = str(time.ctime(os.stat(absolute_path).st_mtime))
        print("Last modified timestamp " + self.last_modified_timestamp)

        self.short_device_node_partition_list = Utility.read_linebreak_delimited_file_into_list(absolute_path)
        print("Source_partitions: " + str(self.short_device_node_partition_list))

        self.backup_version = Utility.read_file_into_string(absolute_path).strip()
        print("Backup version: " + str(self.backup_version))

        self.size_bytes = int(Utility.read_file_into_string(os.path.join(dirname, prefix + ".size").strip()))
        print("Size: " + str(self.size_bytes))
        # Covert size in bytes to KB/MB/GB/TB as relevant
        self.enduser_readable_size = size(int(self.size_bytes), system=alternative)

        self.mbr_absolute_path = os.path.join(dirname, prefix + ".mbr")
        # Get the size of the MBR image because a Sourceforge user named chcatzsf released two unofficial
        # German-language Redo Backup and Recovery update based on Ubuntu 13.10 and Ubuntu 14.04. These two versions
        # incorrectly created 512 byte Master Boot Record backup images. More information [1].
        #
        # [1] https://github.com/rescuezilla/rescuezilla/wiki/Bugs-in-unofficial-Redo-Backup-updates#bugs-in-chcatzsfs-ubuntu-1310-and-1404-releases-german-language-only
        self.mbr_size = int(os.stat(self.mbr_absolute_path).st_size)
        if self.mbr_size == 512:
            # Explain the situation to users with this issue and link to further information about how the GRUB boot
            # loader can be regenerated, and confirm whether they wish to proceed.
            truncated_bootloader_bug_url = "https://github.com/rescuezilla/rescuezilla/wiki/Bugs-in-unofficial-Redo-Backup-updates#bugs-in-chcatzsfs-ubuntu-1310-and-1404-releases-german-language-only";
            # Context for translators: Two popular unofficial Redo Backup v1.0.4 updates by Sourceforge user chcatzsf
            # have major bugs where bootloaders like GRUB are not not fully backed up, so Linux-based operating
            # cannot boot after a restore. This bug only affected those two updates (German-language only) and the
            # problem can be fixed with careful manual intervention. Translating this message into languages other
            # than English and German is not required. Full details in:
            # https://github.com/rescuezilla/rescuezilla/wiki/Bugs-in-unofficial-Redo-Backup-updates#bugs-in
            # -chcatzsfs-ubuntu-1310-and-1404-releases-german-language-only
            self.warning_dict[absolute_path] = _("The backup's bootloader data is shorter than expected. This happens with backups created by an unofficial Redo Backup update. If the backup contained certain bootloaders like GRUB, the restored hard drive will not boot correctly without a manual fix. All data is still fully recoverable but manual intervention may required to restore the bootloader. Please consult {url} for information and assistance. The destination drive has not yet been modified. Do you wish to continue with the restore?").format(url=truncated_bootloader_bug_url)

        self.sfdisk_absolute_path = os.path.join(dirname, prefix + ".sfdisk")
        sfdisk_string = Utility.read_file_into_string(self.sfdisk_absolute_path).strip()
        print("sfdisk: " + str(sfdisk_string))
        self.sfdisk_dict = {}
        # Louvetch's English and French Redo Backup and Recovery update based on Ubuntu 16.04 (Xenial) creates empty
        # sfdisk partition table file for both MBR and GPT disks. More information [1]
        # [1] https://github.com/rescuezilla/rescuezilla/wiki/Bugs-in-unofficial-Redo-Backup-updates#bugs-in-louvetchs-ubuntu-1604-releases
        if sfdisk_string == "":
            empty_sfdisk_bug_url = "https://github.com/rescuezilla/rescuezilla/wiki/Bugs-in-unofficial-Redo-Backup-updates#bugs-in-louvetchs-ubuntu-1604-releases"
            # Context for translators: The two popular unofficial updates of Redo Backup v1.0.4 by Sourceforge user louvetch have a major bug.
            # This major bug affected both the English language and French language versions of this unofficial update. All data can be restored with careful
            # manual intervention. Given the popularity of those unofficial updates, translating this error message is still important.
            self.warning_dict[absolute_path] = _("The backup's extended partition information is empty or missing. This happens with incomplete backups created by an unofficial Redo Backup update. If the backup contains extended partitions, these will not restore correctly. All data is still fully recoverable but manual intervention is required to fully restore the extended partitions. Please consult {url} for information and assistance. The destination drive has not yet been modified. Do you wish to continue with the restore?").format(url=empty_sfdisk_bug_url)
        else:
            self.sfdisk_dict = Sfdisk.parse_sfdisk_dump_output(sfdisk_string)

        # Cannot rely on sfdisk drive name due to some Redo Backup versions not populating this file correctly.
        if 'device' in self.sfdisk_dict.keys():
            self.short_device_node_disk_list = [self.sfdisk_dict['device']]
        else:
            self.short_device_node_disk_list = ["unknown"]

        # The NVMe drive handling on Rescuezilla v1.0.5.1+ set the drive name in the .backup file start with "sdz" to
        # preserve the ability to restore with older versions of Rescuezilla (which read the source drive). Since v2.0,
        # this string is used as a key into sfdisk partitions, so the ".backup" partition list such containing elements
        # like "sdz3" needs to be renamed to eg, "nvme0n1p3" here to preserve full backwards compatibility.
        if 'partitions' in self.sfdisk_dict.keys() and len(self.sfdisk_dict['partitions'].keys()) > 1:
            # Long drive node extracted from sfdisk dictionary
            sfdisk_long_drive_node = list(self.sfdisk_dict['partitions'].keys())[0]
            actual_base_device_node, first_partition_number = Utility.split_device_string(sfdisk_long_drive_node)
            for i in range(len(self.short_device_node_partition_list)):
                node_to_potentially_rename = self.short_device_node_partition_list[i]
                if node_to_potentially_rename.startswith("sdz") and actual_base_device_node != "sdz":
                    # This node is renamed, and the list is updated.
                    fake_base_device_node, actual_partition_number = Utility.split_device_string(node_to_potentially_rename)
                    corrected_long_device_node = Utility.join_device_string(actual_base_device_node, actual_partition_number)
                    corrected_short_device_node = re.sub('/dev/', '', corrected_long_device_node)
                    self.short_device_node_partition_list[i] = corrected_short_device_node

        self.partition_restore_command_dict = collections.OrderedDict()
        self.partclone_info_dict = collections.OrderedDict([])
        for short_device_node in self.short_device_node_partition_list:
            base_device_node, partition_number = Utility.split_device_string(short_device_node)
            image_match_string = os.path.join(dirname, prefix + "_part" + str(partition_number) + ".*")
            # Get absolute path partition images. Eg, [/path/to/20200813_part3.000, /path/to/20200813_part3.001 etc]
            abs_partclone_image_list = glob.glob(image_match_string)
            # Sort by alphabetical sort. Lexical sort not required here because fixed number of digits (so no risk
            # of "1, 10, 2, 3" issues)
            abs_partclone_image_list.sort()
            if len(abs_partclone_image_list) == 0:
                raise Exception("Unable to match any partclone images associated with " + short_device_node + ": " + image_match_string)

            self.partition_restore_command_dict[partition_number] = {'abs_image_glob': abs_partclone_image_list}

            command = "partclone"
            # Rescuezilla v1.0.5 format creates partition to filesystem mapping files
            command_filepath = os.path.join(dirname, prefix + ".partclone.command.part" + str(partition_number))
            if os.path.isfile(command_filepath):
                command = Utility.read_file_into_string(command_filepath).strip()
                print(str(short_device_node) + ": " + command)
                self.partition_restore_command_dict[partition_number]['restore_binary'] = command

            # Get partclone.info
            cat_cmd_list = ["cat"] + abs_partclone_image_list
            pigz_cmd_list = ["pigz", "--decompress", "--stdout"]
            partclone_info_cmd_list = ["partclone.info", "--source", "-"]
            Utility.print_cli_friendly("partclone ", [cat_cmd_list, pigz_cmd_list, partclone_info_cmd_list])
            self.proc['cat_partclone' + short_device_node] = subprocess.Popen(cat_cmd_list, stdout=subprocess.PIPE, env=env,
                                                                      encoding='utf-8')
            self.proc['pigz' + short_device_node] = subprocess.Popen(pigz_cmd_list,
                                                             stdin=self.proc['cat_partclone' + short_device_node].stdout,
                                                             stdout=subprocess.PIPE, env=env, encoding='utf-8')
            self.proc['partclone_info' + short_device_node] = subprocess.Popen(partclone_info_cmd_list,
                                                                           stdin=self.proc['pigz' + short_device_node].stdout,
                                                                           stdout=subprocess.PIPE,
                                                                           stderr=subprocess.PIPE, env=env,
                                                                           encoding='utf-8')
            self.proc['cat_partclone' + short_device_node].stdout.close()  # Allow p1 to receive a SIGPIPE if p2 exits.
            self.proc['pigz' + short_device_node].stdout.close()  # Allow p1 to receive a SIGPIPE if p2 exits.
            output, err = self.proc['partclone_info' + short_device_node].communicate()
            print("partclone_info: Exit output " + str(output) + "stderr " + str(err))
            self.partclone_info_dict[partition_number] = Partclone.parse_partclone_info_output(err)
            if len(self.partclone_info_dict[partition_number]) == 0:
                # If unable to read the partclone.info output, treat this as a dd image (see unit test for
                # partclone.info example output for this case).
                print(self.absolute_path + ": Could not read partclone info dict for " + short_device_node + ". Treating it as a dd image.")
                self.partclone_info_dict[partition_number] = {'filesystem': "dd"}
Пример #7
0
    def __init__(self, absolute_redorescue_json_path, enduser_filename,
                 filename):
        self.image_format = "REDORESCUE_FORMAT"
        self.absolute_path = absolute_redorescue_json_path
        self.enduser_filename = enduser_filename
        self.is_needs_decryption = False
        # Redo Rescue's MBR backup is within its JSON backup file.
        self.mbr_absolute_path = None
        # Redo Rescue's sfdisk backup is within its JSON backup file.
        self.sfdisk_absolute_path = None
        self.user_notes = ""
        self.warning_dict = {}

        # Clonezilla format
        self.ebr_dict = {}
        self.short_device_node_partition_list = []
        self.short_device_node_disk_list = []
        self.lvm_vg_dev_dict = {}
        self.lvm_logical_volume_dict = {}
        self.dev_fs_dict = {}
        self.size_bytes = 0
        self.enduser_readable_size = ""
        self.is_needs_decryption = False
        self.parted_dict = {'partitions': {}}
        self.post_mbr_gap_absolute_path = {}

        if filename.endswith(".redo"):
            prefix = filename.split(".redo")[0]
            print("prefix: " + prefix)
        else:
            raise ValueError(
                "Expected RedoRescue backup to end with .backup: " +
                absolute_redorescue_json_path)

        dirname = os.path.dirname(absolute_redorescue_json_path)
        self.redo_dict = json.loads(
            Utility.read_file_into_string(absolute_redorescue_json_path))
        print(json.dumps(self.redo_dict, indent=4, sort_keys=True))

        # Convert Redo Rescue's English human-readable string to Python datetime
        redo_datetime = datetime.strptime(self.redo_dict['timestamp'],
                                          "%a, %d %b %Y %H:%M:%S %z")
        # Convert to a string that's consistent with the rest of Rescuezilla
        self.last_modified_timestamp = format_datetime(
            datetime.fromtimestamp(redo_datetime.timestamp()))
        print("Last modified timestamp " + self.last_modified_timestamp)

        # TODO: Remove the need for this
        self.short_device_node_disk_list = ["unknown"]

        dir = Path(absolute_redorescue_json_path).parent.as_posix()
        print("Redo Rescue directory : " + dir)

        self.normalized_sfdisk_dict = {
            'absolute_path': None,
            'sfdisk_dict': {
                'partitions': {}
            },
            'file_length': 0
        }
        if len(self.redo_dict['sfd_bin']) != 0:
            sfdisk_bytes = base64.b64decode(self.redo_dict['sfd_bin'])
            print("Decoded: " + str(sfdisk_bytes))
            # Immediately write the decoded bytes to a temp file. This allows the same logic to be shared between other
            # images.
            f = tempfile.NamedTemporaryFile(mode='wb', delete=False)
            f.write(sfdisk_bytes)
            f.close()
            self.normalized_sfdisk_dict = Sfdisk.generate_normalized_sfdisk_dict(
                f.name, self)
        else:
            self.warning_dict[enduser_filename] = EMPTY_SFDISK_MSG

        self.size_bytes = self.redo_dict['drive_bytes']
        # Covert size in bytes to KB/MB/GB/TB as relevant
        self.enduser_readable_size = Utility.human_readable_filesize(
            int(self.size_bytes))

        self.image_format_dict_dict = collections.OrderedDict([])
        for short_device_node in self.redo_dict['parts'].keys():
            base_device_node, partition_number = Utility.split_device_string(
                short_device_node)
            image_match_string = os.path.join(
                dirname,
                self.redo_dict['id'] + "_" + short_device_node + "_" + "*.img")
            print(image_match_string)
            # Get absolute path partition images. Eg, [/path/to/20200813_part3.000, /path/to/20200813_part3.001 etc]
            abs_partclone_image_list = glob.glob(image_match_string)
            # Sort by alphabetical sort. Lexical sort not required here because fixed number of digits (so no risk
            # of "1, 10, 2, 3" issues)
            abs_partclone_image_list.sort()
            if len(abs_partclone_image_list) == 0:
                self.warning_dict[short_device_node] = _(
                    "Cannot find partition's associated partclone image"
                ) + "\n        " + image_match_string
                continue
            filesystem = self.redo_dict['parts'][short_device_node]['fs']
            if filesystem is None:
                filesystem = "<unknown>"
            # Redo Rescue only supports gzip as of writing, but this presumably may change in future.
            detected_compression = Utility.detect_compression(
                abs_partclone_image_list)
            suggested_partclone_binary = "partclone." + filesystem
            if shutil.which(suggested_partclone_binary) is not None:
                self.image_format_dict_dict[short_device_node] = {
                    'type': "partclone",
                    'absolute_filename_glob_list': abs_partclone_image_list,
                    'compression': detected_compression,
                    'filesystem': filesystem,
                    'binary': suggested_partclone_binary,
                    "prefix": prefix,
                    'is_lvm_logical_volume': False
                }
            else:
                self.image_format_dict_dict[short_device_node] = {
                    'type': "dd",
                    'absolute_filename_glob_list': abs_partclone_image_list,
                    'compression': detected_compression,
                    'binary': "partclone.dd",
                    "prefix": prefix,
                    'is_lvm_logical_volume': False
                }

        if 'notes' in self.redo_dict.keys():
            self.user_notes = self.redo_dict['notes']
Пример #8
0
    def _do_drive_query(self):
        env_C_locale = Utility.get_env_C_locale()

        drive_query_start_time = datetime.now()

        GLib.idle_add(self.please_wait_popup.set_secondary_label_text,
                      _("Unmounting: {path}").format(path=IMAGE_EXPLORER_DIR))
        returncode, failed_message = ImageExplorerManager._do_unmount(
            IMAGE_EXPLORER_DIR)
        if not returncode:
            GLib.idle_add(
                self.error_message_callback, False,
                _("Unable to shutdown Image Explorer") + "\n\n" +
                failed_message)
            GLib.idle_add(self.please_wait_popup.destroy)
            return

        if self.is_stop_requested():
            GLib.idle_add(self.error_message_callback, False,
                          _("Operation cancelled by user."))
            return

        GLib.idle_add(
            self.please_wait_popup.set_secondary_label_text,
            _("Unmounting: {path}").format(path=RESCUEZILLA_MOUNT_TMP_DIR))
        returncode, failed_message = ImageExplorerManager._do_unmount(
            RESCUEZILLA_MOUNT_TMP_DIR)
        if not returncode:
            GLib.idle_add(
                self.error_message_callback, False,
                _("Unable to unmount {path}").format(
                    path=RESCUEZILLA_MOUNT_TMP_DIR) + "\n\n" + failed_message)
            GLib.idle_add(self.please_wait_popup.destroy)
            return

        if self.is_stop_requested():
            GLib.idle_add(self.error_message_callback, False,
                          _("Operation cancelled by user."))
            return

        lsblk_cmd_list = [
            "lsblk", "-o",
            "KNAME,NAME,SIZE,TYPE,FSTYPE,MOUNTPOINT,MODEL,SERIAL", "--paths",
            "--bytes", "--json"
        ]
        blkid_cmd_list = ["blkid"]
        os_prober_cmd_list = ["os-prober"]

        lsblk_json_dict = {}
        blkid_dict = {}
        os_prober_dict = {}
        parted_dict_dict = collections.OrderedDict([])
        sfdisk_dict_dict = collections.OrderedDict([])

        # Clonezilla combines drive, partition and filesystem from multiple data sources (lsblk, blkid, parted etc)
        # Rescuezilla continues this approach to reach best possible Clonezilla compatibility.
        #
        # However this sequential querying is slow. A parallel approach should be in theory much faster (but might be
        # less reliable if internal commands are creating file locks etc.)
        #
        # In practice, the sequential approach was about 25% faster than a first-cut (polling-based) parallel approach.
        # Parallel mode currently disabled, but kept for further development/analysis.
        mode = "sequential-drive-query"
        if mode == "sequential-drive-query":
            print("Running drive query in sequential mode")

            # TODO: Run with Utility.interruptable_run() so that even long-lived commands can have a signal sent to it
            #  to shutdown early.

            # Not checking return codes here because Clonezilla does not, and some of these commands are expected to
            # fail. The Utility.run() command prints the output to stdout.
            GLib.idle_add(self.please_wait_popup.set_secondary_label_text,
                          _("Running: {app}").format(app="lsblk"))
            process, flat_command_string, fail_description = Utility.run(
                "lsblk", lsblk_cmd_list, use_c_locale=True)
            lsblk_json_dict = json.loads(process.stdout)

            if self.is_stop_requested():
                GLib.idle_add(self.error_message_callback, False,
                              _("Operation cancelled by user."))
                return

            GLib.idle_add(self.please_wait_popup.set_secondary_label_text,
                          _("Running: {app}").format(app="blkid"))
            process, flat_command_string, fail_description = Utility.run(
                "blkid", blkid_cmd_list, use_c_locale=True)
            blkid_dict = Blkid.parse_blkid_output(process.stdout)

            if self.is_stop_requested():
                GLib.idle_add(self.error_message_callback, False,
                              _("Operation cancelled by user."))
                return

            GLib.idle_add(self.please_wait_popup.set_secondary_label_text,
                          _("Running: {app}").format(app="os-prober"))
            # Use os-prober to get OS information (running WITH original locale information
            process, flat_command_string, fail_description = Utility.run(
                "osprober", os_prober_cmd_list, use_c_locale=True)
            os_prober_dict = OsProber.parse_os_prober_output(process.stdout)

            if self.is_stop_requested():
                GLib.idle_add(self.error_message_callback, False,
                              _("Operation cancelled by user."))
                return

            for lsblk_dict in lsblk_json_dict['blockdevices']:
                partition_longdevname = lsblk_dict['name']
                print("Going to run parted and sfdisk on " +
                      partition_longdevname)
                try:
                    GLib.idle_add(
                        self.please_wait_popup.set_secondary_label_text,
                        _("Running {app} on {device}").format(
                            app="parted", device=partition_longdevname))
                    process, flat_command_string, fail_description = Utility.run(
                        "parted",
                        self._get_parted_cmd_list(partition_longdevname),
                        use_c_locale=True)
                    if "unrecognized disk label" not in process.stderr:
                        parted_dict_dict[
                            partition_longdevname] = Parted.parse_parted_output(
                                process.stdout)
                    else:
                        print("Parted says " + process.stderr)

                    if self.is_stop_requested():
                        GLib.idle_add(self.error_message_callback, False,
                                      _("Operation cancelled by user."))
                        return
                    GLib.idle_add(
                        self.please_wait_popup.set_secondary_label_text,
                        _("Running {app} on {device}").format(
                            app="sfdisk", device=partition_longdevname))
                    process, flat_command_string, fail_description = Utility.run(
                        "sfdisk",
                        self._get_sfdisk_cmd_list(partition_longdevname),
                        use_c_locale=True)
                    sfdisk_dict_dict[
                        partition_longdevname] = Sfdisk.parse_sfdisk_dump_output(
                            process.stdout)
                    if self.is_stop_requested():
                        GLib.idle_add(self.error_message_callback, False,
                                      _("Operation cancelled by user."))
                        return

                except Exception:
                    print("Could run run parted on " + partition_longdevname)
        elif mode == "parallel-drive-query":
            print("Running drive query in parallel mode")
            # Launch drive query in parallel. Parallel Python subprocess.Popen() approach adapted from [1]
            # [1] https://stackoverflow.com/a/636601
            cmd_dict = {
                ('lsblk', ""):
                subprocess.Popen(lsblk_cmd_list,
                                 env=env_C_locale,
                                 stdout=subprocess.PIPE,
                                 stderr=subprocess.PIPE,
                                 encoding="utf-8",
                                 universal_newlines=True),
                ('blkid', ""):
                subprocess.Popen(blkid_cmd_list,
                                 env=env_C_locale,
                                 stdout=subprocess.PIPE,
                                 stderr=subprocess.PIPE,
                                 encoding="utf-8",
                                 universal_newlines=True),
                ('os_prober', ""):
                subprocess.Popen(os_prober_cmd_list,
                                 env=env_C_locale,
                                 stdout=subprocess.PIPE,
                                 stderr=subprocess.PIPE,
                                 encoding="utf-8",
                                 universal_newlines=True),
            }
            while cmd_dict:
                print("drive_query_process is length " + str(len(cmd_dict)) +
                      " with contents " + str(cmd_dict))
                for key in list(cmd_dict.keys()):
                    proc = cmd_dict[key]
                    retcode = proc.poll()
                    if retcode is not None:  # Process finished.
                        cmd_dict.pop(key, None)
                        if key[0] == "lsblk" and retcode == 0:
                            # lsblk is complete, partition information can be used to launch the parted/sfdisk
                            lsblk_json_dict = json.loads(proc.stdout.read())
                            for lsblk_dict in lsblk_json_dict['blockdevices']:
                                partition_longdevname = lsblk_dict['name']
                                print("Launching parted and sfdisk on " +
                                      partition_longdevname)
                                try:
                                    cmd_dict[("parted", partition_longdevname
                                              )] = subprocess.Popen(
                                                  self._get_parted_cmd_list(
                                                      partition_longdevname),
                                                  env=env_C_locale,
                                                  encoding="utf-8",
                                                  universal_newlines=True)
                                    cmd_dict[("sfdisk", partition_longdevname
                                              )] = subprocess.Popen(
                                                  self._get_sfdisk_cmd_list(
                                                      partition_longdevname),
                                                  env=env_C_locale,
                                                  encoding="utf-8",
                                                  universal_newlines=True)
                                except Exception:
                                    print("Could launch sfdisk or parted on " +
                                          partition_longdevname)
                        elif key[0] == "blkid" and retcode == 0:
                            blkid_dict = Blkid.parse_blkid_output(
                                proc.stdout.read())
                        elif key[0] == "osprober" and retcode == 0:
                            os_prober_dict = OsProber.parse_os_prober_output(
                                proc.stdout.read())
                        elif key[
                                0] == "sfdisk" and retcode == 0 and proc.stdout is not None:
                            sfdisk_dict_dict[
                                key[1]] = Sfdisk.parse_sfdisk_dump_output(
                                    proc.stdout.read())
                        elif key[
                                0] == "parted" and retcode == 0 and proc.stdout is not None:
                            if proc.stderr is not None:
                                stderr = proc.stderr.read()
                                print("parted with key " + str(key) +
                                      " had stderr " + stderr)
                                if "unrecognized disk label" not in stderr:
                                    parted_dict_dict[
                                        key[1]] = Parted.parse_parted_output(
                                            proc.stdout.read())
                        else:
                            print(
                                "COULD NOT PROCESS process launched with key "
                                + str(key) + " return code" + str(retcode))
                            if proc.stdout is not None:
                                print("stdout:" + proc.stdout.read())
                            if proc.stderr is not None:
                                print(" stderr:" + proc.stderr.read())
                    else:  # No process is done, wait a bit and check again.
                        time.sleep(0.1)
                        continue
        else:
            raise Exception("Invalid drive query mode")
        self.drive_state = CombinedDriveState.construct_combined_drive_state_dict(
            lsblk_json_dict, blkid_dict, os_prober_dict, parted_dict_dict,
            sfdisk_dict_dict)
        pp = pprint.PrettyPrinter(indent=4)
        pp.pprint(self.drive_state)

        drive_query_end_time = datetime.now()
        print("Drive query took: " +
              str((drive_query_end_time - drive_query_start_time)))
        GLib.idle_add(self.populate_drive_selection_table)
Пример #9
0
    def _do_drive_query(self):
        env_C_locale = Utility.get_env_C_locale()

        drive_query_start_time = datetime.now()
        lsblk_cmd_list = [
            "lsblk", "-o", "KNAME,NAME,SIZE,TYPE,FSTYPE,MOUNTPOINT,MODEL",
            "--paths", "--bytes", "--json"
        ]
        blkid_cmd_list = ["blkid"]
        os_prober_cmd_list = ["os-prober"]

        lsblk_json_dict = {}
        blkid_dict = {}
        os_prober_dict = {}
        parted_dict_dict = collections.OrderedDict([])
        sfdisk_dict_dict = collections.OrderedDict([])

        # Clonezilla combines drive, partition and filesystem from multiple data sources (lsblk, blkid, parted etc)
        # Rescuezilla continues this approach to reach best possible Clonezilla compatibility.
        #
        # However this sequential querying is slow. A parallel approach should be in theory much faster (but might be
        # less reliable if internal commands are creating file locks etc.)
        #
        # In practice, the sequential approach was about 25% faster than a first-cut (polling-based) parallel approach.
        # Parallel mode currently disabled, but kept for further development/analysis.
        mode = "sequential-drive-query"
        if mode == "sequential-drive-query":
            print("Running drive query in sequential mode")
            lsblk_stdout, lsblk_stderr, lsblk_return_code = Utility.run_external_command(
                lsblk_cmd_list, self.temp_callback, env_C_locale)
            lsblk_json_dict = json.loads(lsblk_stdout)
            blkid_stdout, blkid_stderr, blkid_return_code = Utility.run_external_command(
                blkid_cmd_list, self.temp_callback, env_C_locale)
            blkid_dict = Blkid.parse_blkid_output(blkid_stdout)

            # Use os-prober to get OS information (running WITH original locale information
            os_prober_stdout, os_prober_stderr, os_prober_return_code = Utility.run_external_command(
                os_prober_cmd_list, self.temp_callback, os.environ.copy())
            os_prober_dict = OsProber.parse_os_prober_output(os_prober_stdout)

            for lsblk_dict in lsblk_json_dict['blockdevices']:
                partition_longdevname = lsblk_dict['name']
                print("Going to run parted and sfdisk on " +
                      partition_longdevname)
                try:
                    parted_stdout, parted_stderr, parted_return_code = Utility.run_external_command(
                        self._get_parted_cmd_list(partition_longdevname),
                        self.temp_callback, env_C_locale)
                    if "unrecognized disk label" not in parted_stderr:
                        parted_dict_dict[
                            partition_longdevname] = Parted.parse_parted_output(
                                parted_stdout)
                    else:
                        print("Parted says " + parted_stderr)
                    sfdisk_stdout, sfdisk_stderr, sfdisk_return_code = Utility.run_external_command(
                        self._get_sfdisk_cmd_list(partition_longdevname),
                        self.temp_callback, env_C_locale)
                    sfdisk_dict_dict[
                        partition_longdevname] = Sfdisk.parse_sfdisk_dump_output(
                            sfdisk_stdout)
                except Exception:
                    print("Could run run parted on " + partition_longdevname)
        elif mode == "parallel-drive-query":
            print("Running drive query in parallel mode")
            # Launch drive query in parallel. Parallel Python subprocess.Popen() approach adapted from [1]
            # [1] https://stackoverflow.com/a/636601
            cmd_dict = {
                ('lsblk', ""):
                subprocess.Popen(lsblk_cmd_list,
                                 env=env_C_locale,
                                 stdout=subprocess.PIPE,
                                 stderr=subprocess.PIPE,
                                 encoding="utf-8",
                                 universal_newlines=True),
                ('blkid', ""):
                subprocess.Popen(blkid_cmd_list,
                                 env=env_C_locale,
                                 stdout=subprocess.PIPE,
                                 stderr=subprocess.PIPE,
                                 encoding="utf-8",
                                 universal_newlines=True),
                ('os_prober', ""):
                subprocess.Popen(os_prober_cmd_list,
                                 env=env_C_locale,
                                 stdout=subprocess.PIPE,
                                 stderr=subprocess.PIPE,
                                 encoding="utf-8",
                                 universal_newlines=True),
            }
            while cmd_dict:
                print("drive_query_process is length " + str(len(cmd_dict)) +
                      " with contents " + str(cmd_dict))
                for key in list(cmd_dict.keys()):
                    proc = cmd_dict[key]
                    retcode = proc.poll()
                    if retcode is not None:  # Process finished.
                        cmd_dict.pop(key, None)
                        if key[0] == "lsblk" and retcode == 0:
                            # lsblk is complete, partition information can be used to launch the parted/sfdisk
                            lsblk_json_dict = json.loads(proc.stdout.read())
                            for lsblk_dict in lsblk_json_dict['blockdevices']:
                                partition_longdevname = lsblk_dict['name']
                                print("Launching parted and sfdisk on " +
                                      partition_longdevname)
                                try:
                                    cmd_dict[("parted", partition_longdevname
                                              )] = subprocess.Popen(
                                                  self._get_parted_cmd_list(
                                                      partition_longdevname),
                                                  env=env_C_locale,
                                                  encoding="utf-8",
                                                  universal_newlines=True)
                                    cmd_dict[("sfdisk", partition_longdevname
                                              )] = subprocess.Popen(
                                                  self._get_sfdisk_cmd_list(
                                                      partition_longdevname),
                                                  env=env_C_locale,
                                                  encoding="utf-8",
                                                  universal_newlines=True)
                                except Exception:
                                    print("Could launch sfdisk or parted on " +
                                          partition_longdevname)
                        elif key[0] == "blkid" and retcode == 0:
                            blkid_dict = Blkid.parse_blkid_output(
                                proc.stdout.read())
                        elif key[0] == "osprober" and retcode == 0:
                            os_prober_dict = OsProber.parse_os_prober_output(
                                proc.stdout.read())
                        elif key[
                                0] == "sfdisk" and retcode == 0 and proc.stdout is not None:
                            sfdisk_dict_dict[
                                key[1]] = Sfdisk.parse_sfdisk_dump_output(
                                    proc.stdout.read())
                        elif key[
                                0] == "parted" and retcode == 0 and proc.stdout is not None:
                            if proc.stderr is not None:
                                stderr = proc.stderr.read()
                                print("parted with key " + key +
                                      " had stderr " + stderr)
                                if "unrecognized disk label" not in stderr:
                                    parted_dict_dict[
                                        key[1]] = Parted.parse_parted_output(
                                            proc.stdout.read())
                        else:
                            print(
                                "COULD NOT PROCESS process launched with key "
                                + str(key) + " return code" + str(retcode))
                            if proc.stdout is not None:
                                print("stdout:" + proc.stdout.read())
                            if proc.stderr is not None:
                                print(" stderr:" + proc.stderr.read())
                    else:  # No process is done, wait a bit and check again.
                        time.sleep(0.1)
                        continue
        else:
            raise Exception("Invalid drive query mode")
        self.drive_state = CombinedDriveState.construct_combined_drive_state_dict(
            lsblk_json_dict, blkid_dict, os_prober_dict, parted_dict_dict,
            sfdisk_dict_dict)
        pp = pprint.PrettyPrinter(indent=4)
        pp.pprint(self.drive_state)

        drive_query_end_time = datetime.now()
        print("Drive query took: " +
              str((drive_query_end_time - drive_query_start_time)))
        GLib.idle_add(self.populate_drive_selection_table)
Пример #10
0
    def __init__(self, absolute_clonezilla_img_path, enduser_filename):
        self.absolute_path = absolute_clonezilla_img_path
        self.enduser_filename = enduser_filename
        self.warning_dict = {}

        statbuf = os.stat(self.absolute_path)
        self.last_modified_timestamp = format_datetime(
            datetime.fromtimestamp(statbuf.st_mtime))
        print("Last modified timestamp " + self.last_modified_timestamp)

        self.image_format = "CLONEZILLA_FORMAT"
        dir = Path(absolute_clonezilla_img_path).parent.as_posix()
        print("Clonezilla directory : " + dir)

        self.short_device_node_partition_list = []
        self.short_device_node_disk_list = []
        self.lvm_vg_dev_dict = {}
        self.lvm_logical_volume_dict = {}
        self.dev_fs_dict = {}

        self.is_needs_decryption = False
        self.ecryptfs_info_dict = None
        ecryptfs_info_filepath = os.path.join(dir, "ecryptfs.info")
        if isfile(ecryptfs_info_filepath):
            try:
                # ecryptfs.info is plain text when the directory is encrypted and produces Input/Output error when decrypted
                Utility.read_file_into_string(ecryptfs_info_filepath)
                self.is_needs_decryption = True
            except:
                self.is_needs_decryption = False

        if self.is_needs_decryption:
            self.ecryptfs_info_dict = Ecryptfs.parse_ecryptfs_info(
                Utility.read_file_into_string(ecryptfs_info_filepath))
            self.short_device_node_disk_list = self.ecryptfs_info_dict['disk']
            self.short_device_node_partition_list = self.ecryptfs_info_dict[
                'parts']
        else:
            # The 'parts' file contains space separated short partition device nodes (eg 'sda1 sda2 sda7') corresponding
            # to the partitions that were selected by the user during the original backup.
            parts_filepath = os.path.join(dir, "parts")
            if isfile(parts_filepath):
                self.short_device_node_partition_list = Utility.read_space_delimited_file_into_list(
                    parts_filepath)
            else:
                # Every Clonezilla image encountered so far has a 'parts' file, so the backup is considered invalid
                # if none is present.
                raise FileNotFoundError("Unable to locate " + parts_filepath +
                                        " or file is encrypted")

            # The 'disk' file can contain *multiple* space-separated short device nodes (eg 'sda sdb'), but most
            # users will only backup one drive at a time using Clonezilla.
            #
            # Clonezilla images created using 'saveparts' function (rather than 'savedisk') does NOT have this file.
            disk_filepath = os.path.join(dir, "disk")
            if isfile(disk_filepath):
                self.short_device_node_disk_list = Utility.read_space_delimited_file_into_list(
                    disk_filepath)
            else:
                print("Unable to locate " + disk_filepath)
                # Clonezilla images created using 'saveparts' (rather than 'savedisks') don't have this file. However, if
                # 'saveparts' is used on partitions that multiple disks that each contain partition tables then it's vital
                # that the short device nodes information is extracted in order for the user to be able to restoring their
                # intended partition table.
                #
                #
                parted_absolute_path_list = glob.glob(
                    os.path.join(dir, "*-pt.parted"))
                for parted_absolute_path in parted_absolute_path_list:
                    self.short_device_node_disk_list.append(
                        re.sub('-pt.parted', '',
                               os.path.basename(parted_absolute_path)))
                if len(self.short_device_node_disk_list) == 0:
                    # If the device list is still empty it must be due to using 'saveparts' on a drive without a
                    # partition table. Append these device odds onto the disk list for convenience.
                    self.short_device_node_disk_list += self.short_device_node_partition_list

            # TODO: Re-evaluate the need to parse this file, as far as I can tell all the information can be extracted
            # from the partition information.
            # The 'dev-fs.list' file contains the association between device nodes and the filesystems (eg '/dev/sda2 ext4')
            dev_fs_list_filepath = os.path.join(dir, "dev-fs.list")
            if isfile(dev_fs_list_filepath):
                self.dev_fs_dict = ClonezillaImage.parse_dev_fs_list_output(
                    Utility.read_file_into_string(dev_fs_list_filepath))
            else:
                # Not raising exception because older Clonezilla images don't have this file.
                print("Unable to locate " + dev_fs_list_filepath)

            # The 'blkid.list' file provides a snapshot of the partitions on the system at the time of backup. This data
            # is not particularly relevant during a restore operation, except potentially for eg, UUID.
            #
            # Danger: Do not mistake this structure for the current system's 'blkid' information.
            # TODO: Re-evaluate the need to parse this file. The risk of mistaken usage may outweigh its usefulness.
            self.blkid_dict = {}
            blkid_list_filepath = os.path.join(dir, "blkid.list")
            if isfile(blkid_list_filepath):
                self.blkid_dict = Blkid.parse_blkid_output(
                    Utility.read_file_into_string(
                        os.path.join(dir, blkid_list_filepath)))
            else:
                # Not raising exception because older Clonezilla images don't have this file.
                print("Unable to locate " + blkid_list_filepath)

            # The 'lvm_vg_dev.list' file contains the association between an LVM VG (Logical Volume Manager volume group)
            # with a name eg 'vgtest', the LVM PV (physical volume) with a UUID name, and the device node that the physical
            # volume resides on eg, /dev/sdb.
            lvm_vg_dev_list_filepath = os.path.join(dir, "lvm_vg_dev.list")
            if isfile(
                    lvm_vg_dev_list_filepath) and not self.is_needs_decryption:
                self.lvm_vg_dev_dict = Lvm.parse_volume_group_device_list_string(
                    Utility.read_file_into_string(lvm_vg_dev_list_filepath))
            else:
                print("No LVM volume group to device file detected in image")

            # The 'lvm_logv.list' file contains the association between device nodes and the filesystems (eg '/dev/sda2 ext4')
            lvm_logv_list_filepath = os.path.join(dir, "lvm_logv.list")
            if isfile(lvm_logv_list_filepath) and not self.is_needs_decryption:
                self.lvm_logical_volume_dict = Lvm.parse_logical_volume_device_list_string(
                    Utility.read_file_into_string(lvm_logv_list_filepath))
            else:
                print("No LVM logical volume file detected in image")

        self.parted_dict_dict = {}
        self.sfdisk_dict_dict = {}
        self.mbr_dict_dict = {}
        self.ebr_dict_dict = {}
        self.size_bytes = 0
        self.enduser_readable_size = "unknown"
        for short_disk_device_node in self.short_device_node_disk_list:
            self.size_bytes = 0
            # Clonezilla -pt.parted file lists size in sectors, rather than bytes (or end-user readable KB/MB/GB/TB as
            # Clonezilla's -pt.parted.compact file)
            parted_filepath = os.path.join(
                dir, short_disk_device_node + "-pt.parted")
            if isfile(parted_filepath) and not self.is_needs_decryption:
                self.parted_dict_dict[
                    short_disk_device_node] = Parted.parse_parted_output(
                        Utility.read_file_into_string(parted_filepath))
                if 'capacity' in self.parted_dict_dict[short_disk_device_node] and 'logical_sector_size' in \
                        self.parted_dict_dict[short_disk_device_node]:
                    self.size_bytes = self.parted_dict_dict[short_disk_device_node]['capacity'] * \
                                      self.parted_dict_dict[short_disk_device_node]['logical_sector_size']
                else:
                    raise Exception(
                        "Unable to calculate disk capacity using " +
                        parted_filepath + ": " +
                        str(self.parted_dict_dict[short_disk_device_node]))
            else:
                # Do not raise exception because parted partition table is not present when using 'saveparts'
                print("Unable to locate " + parted_filepath +
                      " or file is encrypted")

            if self.ecryptfs_info_dict is not None and 'size' in self.ecryptfs_info_dict.keys(
            ):
                self.enduser_readable_size = self.ecryptfs_info_dict[
                    'size'].strip("_")
            else:
                # Covert size in bytes to KB/MB/GB/TB as relevant
                self.enduser_readable_size = size(int(self.size_bytes),
                                                  system=alternative)
                sfdisk_filepath = os.path.join(
                    dir, short_disk_device_node + "-pt.sf")
                if isfile(sfdisk_filepath) and not self.is_needs_decryption:
                    sfdisk_string = Utility.read_file_into_string(
                        sfdisk_filepath)
                    self.sfdisk_dict_dict[short_disk_device_node] = {
                        'absolute_path':
                        sfdisk_filepath,
                        'sfdisk_dict':
                        Sfdisk.parse_sfdisk_dump_output(sfdisk_string),
                        'sfdisk_file_length':
                        len(sfdisk_string)
                    }
                else:
                    # Do not raise exception because sfdisk partition table is often missing using Clonezilla image format,
                    # as `sfdisk --dump` fails for disks without a partition table.
                    print("Unable to locate " + sfdisk_filepath +
                          " or file is encrypted")

            # There is a maximum of 1 MBR per drive (there can be many drives). Master Boot Record (EBR) is never
            # listed in 'parts' list.
            mbr_glob_list = glob.glob(
                os.path.join(dir, short_disk_device_node) + "*-mbr")
            for absolute_mbr_filepath in mbr_glob_list:
                short_mbr_device_node = basename(absolute_mbr_filepath).split(
                    "-mbr")[0]
                self.mbr_dict_dict[short_disk_device_node] = {
                    'short_device_node': short_mbr_device_node,
                    'absolute_path': absolute_mbr_filepath
                }

            # There is a maximum of 1 EBR per drive (there can be many drives). Extended Boot Record (EBR) is never
            # listed in 'parts' list.
            ebr_glob_list = glob.glob(
                os.path.join(dir, short_disk_device_node) + "*-ebr")
            for absolute_ebr_filepath in ebr_glob_list:
                short_ebr_device_node = basename(absolute_ebr_filepath).split(
                    "-ebr")[0]
                self.ebr_dict_dict[short_disk_device_node] = {
                    'short_device_node': short_ebr_device_node,
                    'absolute_path': absolute_ebr_filepath
                }

        self.image_format_dict_dict = collections.OrderedDict([])
        # Loops over the partitions listed in the 'parts' file
        for short_partition_device_node in self.short_device_node_partition_list:
            has_found_atleast_one_associated_image = False
            # For standard MBR and GPT partitions, the partition key listed in the 'parts' file has a directly
            # associated backup image, so check for this.
            image_format_dict = ClonezillaImage.scan_backup_image(
                dir, short_partition_device_node, self.is_needs_decryption)
            # If no match found check the LVM (Logical Volume Manager)
            if len(image_format_dict) == 0:
                # Loop over all the volume groups (if any)
                for vg_name in self.lvm_vg_dev_dict.keys():
                    # TODO: Evalulate if there are Linux multipath device nodes that hold LVM Physical Volumes.
                    # TODO: May need to adjust for multipath device node by replacing "/" with "-" for this node.
                    pv_short_device_node = re.sub(
                        '/dev/', '',
                        self.lvm_vg_dev_dict[vg_name]['device_node'])
                    # Check if there is an associated LVM Physical Volume (PV) present
                    if short_partition_device_node == pv_short_device_node:
                        # Yes, the partition being analysed is associated with an LVM physical volume that contains
                        # an LVM Volume Group. Now determine all backup images associated to Logical Volumes that
                        # reside within this Volume Group.
                        for lv_path in self.lvm_logical_volume_dict.keys():
                            candidate_lv_path_prefix = "/dev/" + vg_name + "/"
                            # Eg, "/dev/cl/root".startswith("/dev/cl")
                            if lv_path.startswith(candidate_lv_path_prefix):
                                # Found a logical volume. Note: There may be more than one LV associated with an VG
                                # Set the scan prefix for the backup image to eg "cl-root"
                                logical_volume_scan_key = re.sub(
                                    '/', '-', re.sub('/dev/', '', lv_path))
                                image_format_dict = ClonezillaImage.scan_backup_image(
                                    dir, logical_volume_scan_key,
                                    self.is_needs_decryption)
                                if len(image_format_dict) != 0:
                                    image_format_dict[
                                        'is_lvm_logical_volume'] = True
                                    image_format_dict[
                                        'volume_group_name'] = vg_name
                                    image_format_dict['physical_volume_long_device_node'] = \
                                    self.lvm_vg_dev_dict[vg_name]['device_node']
                                    image_format_dict[
                                        'logical_volume_long_device_node'] = lv_path
                                    self.image_format_dict_dict[
                                        logical_volume_scan_key] = image_format_dict
                                    has_found_atleast_one_associated_image = True
            else:
                has_found_atleast_one_associated_image = True
                self.image_format_dict_dict[
                    short_partition_device_node] = image_format_dict

            if not has_found_atleast_one_associated_image:
                self.image_format_dict_dict[short_partition_device_node] = {
                    'type': "missing",
                    'prefix': short_partition_device_node,
                    'is_lvm_logical_volume': False
                }
                # TODO: Improve conversion between /dev/ nodes to short dev node.
                long_partition_key = "/dev/" + short_partition_device_node
                if long_partition_key in self.dev_fs_dict.keys():
                    # Annotate have filesystem information from dev-fs.list file. This case is expected when during a
                    # backup Clonezilla or Rescuezilla failed to successfully image the filesystem, but may have
                    # succeeded for other filesystems.
                    fs = self.dev_fs_dict[long_partition_key]
                    self.image_format_dict_dict[short_partition_device_node][
                        'filesystem'] = fs
                    # TODO: Consider removing warning_dict as image_format_dict is sufficient.
                    self.warning_dict[short_partition_device_node] = fs
                elif self.is_needs_decryption:
                    self.warning_dict[short_partition_device_node] = _(
                        "Needs decryption")
                else:
                    self.warning_dict[short_partition_device_node] = _(
                        "Unknown filesystem")

        # Unfortunately swap partitions are not listed in the 'parts' file. There does not appear to be any alternative
        # but scanning for the swap partitions and add them to the existing partitions, taking care to avoid duplicates
        # by rescanning what has already been scanned due to listing as an LVM logical volume.
        swap_partition_info_glob_list = glob.glob(
            os.path.join(dir, "swappt-*.info"))
        for swap_partition_info_glob in swap_partition_info_glob_list:
            key = Swappt.get_short_device_from_swappt_info_filename(
                swap_partition_info_glob)
            already_scanned = False
            for image_format_dict_key in self.image_format_dict_dict.keys():
                if key == self.image_format_dict_dict[image_format_dict_key][
                        "prefix"]:
                    already_scanned = True
                    break
            if not already_scanned and not self.is_needs_decryption:
                self.image_format_dict_dict[key] = Swappt.parse_swappt_info(
                    Utility.read_file_into_string(swap_partition_info_glob))
                self.image_format_dict_dict[key]['type'] = "swap"
                self.image_format_dict_dict[key]['prefix'] = key
                self.image_format_dict_dict[key][
                    'is_lvm_logical_volume'] = False
        pp = pprint.PrettyPrinter(indent=4)
        pp.pprint(self.image_format_dict_dict)
Пример #11
0
    def __init__(self, absolute_foxclone_img_path, enduser_filename, filename):
        self.image_format = "FOXCLONE_FORMAT"
        self.absolute_path = absolute_foxclone_img_path
        self.enduser_filename = enduser_filename
        self.user_notes = ""
        self.warning_dict = {}

        # Clonezilla format
        self.ebr_dict = {}
        self.efi_nvram_dat_absolute_path = None
        self.short_device_node_partition_list = []
        self.short_device_node_disk_list = []
        self.lvm_vg_dev_dict = {}
        self.lvm_logical_volume_dict = {}
        self.sfdisk_chs_dict = None
        self.dev_fs_dict = {}
        self.size_bytes = 0
        self.enduser_readable_size = ""
        self.is_needs_decryption = False
        self.normalized_sfdisk_dict = {
            'absolute_path': None,
            'sfdisk_dict': {
                'partitions': {}
            },
            'file_length': 0
        }
        self.parted_dict = {'partitions': {}}
        self.post_mbr_gap_dict = {}

        if filename.endswith(".backup"):
            prefix = filename.split(".backup")[0]
            print("prefix: " + prefix)
        else:
            raise ValueError("Expected Foxclone backup to end with .backup: " +
                             absolute_foxclone_img_path)

        dirname = os.path.dirname(absolute_foxclone_img_path)
        self.foxclone_dict = FoxcloneImage.parse_dot_backup(
            Utility.read_file_into_string(absolute_foxclone_img_path))
        with Utility.setlocale('C'):
            # Convert Foxclone's English human-readable string to Python datetime
            foxclone_datetime = datetime.strptime(
                self.foxclone_dict['timestamp'], "%d %b %Y, %H:%M")
        # Convert to a string that's consistent with the rest of Rescuezilla
        self.last_modified_timestamp = format_datetime(
            datetime.fromtimestamp(foxclone_datetime.timestamp()))
        print("Last modified timestamp " + self.last_modified_timestamp)

        dir = Path(absolute_foxclone_img_path).parent.as_posix()
        print("Foxclone directory : " + dir)

        sfdisk_absolute_path = os.path.join(dirname, prefix + ".sfdisk")
        self.normalized_sfdisk_dict = Sfdisk.generate_normalized_sfdisk_dict(
            sfdisk_absolute_path, self)

        if 'device' in self.normalized_sfdisk_dict['sfdisk_dict'].keys():
            self.short_device_node_disk_list = [
                self.normalized_sfdisk_dict['sfdisk_dict']['device']
            ]

        self._mbr_absolute_path = None
        mbr_path_string = os.path.join(dirname, prefix + ".grub")
        if os.path.exists(mbr_path_string):
            self._mbr_absolute_path = mbr_path_string
        else:
            self.warning_dict[self.short_device_node_disk_list] = "Missing MBR"

        self.image_format_dict_dict = collections.OrderedDict([])
        for short_device_node in self.foxclone_dict['partitions'].keys():
            type = self.foxclone_dict['partitions'][short_device_node]['type']
            if type == "extended":
                # Foxclone never has a partition associated with the extended partition.
                continue
            filesystem = self.foxclone_dict['partitions'][short_device_node][
                'fs']
            base_device_node, partition_number = Utility.split_device_string(
                short_device_node)
            # The partclone image Foxclone creates can be compressed, and can be split into multiple files, depending
            # on the settings the user configures. The compression and split settings are saved in the metadata file,
            # which was already parsed above. The filename patterns may be as follows:
            #
            # Uncompressed, no split: 20210307.sdj1.img
            # Uncompressed, split: 20210307.sdj1.img.00
            # Compressed, no split: 20210307.sdj1.img
            # Compressed, split: 20210307.sdj1.img.00
            image_match_string = prefix + "." + short_device_node + ".img"
            if self.foxclone_dict['is_compressed']:
                # Foxclone only supports gzip compression
                image_match_string += ".gz"
            if self.foxclone_dict['is_split']:
                # The split filenames goes 88, 89, 9000, 9001 etc. It looks like partclone does this, perhaps to
                # prevent any issue around lexical ordering. This is good, because lexical ordering is used here.
                image_match_string += ".*"
            image_match_path_string = os.path.join(dirname, image_match_string)
            print(image_match_path_string)
            # Get absolute path partition images. Eg, [/path/to/20200813_part3.000, /path/to/20200813_part3.001 etc]
            abs_partclone_image_list = glob.glob(image_match_path_string)
            # Sort by alphabetical sort. Lexical sort not required here because fixed number of digits (so no risk of
            # "1, 10, 2, 3" issues), and partclone manages this when the number of digits is no longer fixed (see
            # above)
            abs_partclone_image_list.sort()
            if len(abs_partclone_image_list) == 0 and filesystem != "swap":
                self.warning_dict[short_device_node] = _(
                    "Cannot find partition's associated partclone image"
                ) + "\n        " + image_match_string
                continue

            if filesystem == "swap":
                self.image_format_dict_dict[short_device_node] = {
                    'type':
                    "swap",
                    # TODO: Make the fact the UUID gets placed in 'type' field less confusing.
                    'uuid':
                    self.foxclone_dict['partitions'][short_device_node]
                    ['type'],
                    'label':
                    "",
                    "prefix":
                    prefix,
                    'is_lvm_logical_volume':
                    False
                }
            elif filesystem != "<unknown>":
                # Detect compression because while Foxclone only supports gzip as of writing, but this presumably may change in future.
                self.image_format_dict_dict[short_device_node] = {
                    'type':
                    "partclone",
                    'absolute_filename_glob_list':
                    abs_partclone_image_list,
                    'compression':
                    Utility.detect_compression(abs_partclone_image_list),
                    'filesystem':
                    filesystem,
                    # Assumption that binary is valid.
                    'binary':
                    "partclone." + filesystem,
                    "prefix":
                    prefix,
                    'is_lvm_logical_volume':
                    False
                }
            else:
                # Detect compression because while Foxclone only supports gzip as of writing, but this presumably may change in future.
                self.image_format_dict_dict[short_device_node] = {
                    'type':
                    "dd",
                    'absolute_filename_glob_list':
                    abs_partclone_image_list,
                    'compression':
                    Utility.detect_compression(abs_partclone_image_list),
                    'binary':
                    "partclone.dd",
                    "prefix":
                    prefix,
                    'is_lvm_logical_volume':
                    False
                }

        notes_abs_path = os.path.join(dirname, prefix + ".note.txt")
        if os.path.exists(notes_abs_path):
            self.user_notes = Utility.read_file_into_string(notes_abs_path)

        # Foxclone doesn't keep track of the drive capacity, so estimate it from sfdisk partition table backup
        last_partition_key, last_partition_final_byte = Sfdisk.get_highest_offset_partition(
            self.normalized_sfdisk_dict)
        self.size_bytes = last_partition_final_byte
        # Covert size in bytes to KB/MB/GB/TB as relevant
        self.enduser_readable_size = Utility.human_readable_filesize(
            int(self.size_bytes))

        for image_format_dict_key in self.image_format_dict_dict.keys():
            estimated_size_bytes = self._compute_partition_size_byte_estimate(
                image_format_dict_key)
            self.image_format_dict_dict[image_format_dict_key][
                'estimated_size_bytes'] = estimated_size_bytes

        self.is_needs_decryption = False
Пример #12
0
    def __init__(self, absolute_clonezilla_img_path, enduser_filename, dir,
                 ecryptfs_info_dict, is_needs_decryption,
                 short_disk_device_node, short_device_node_partition_list,
                 is_display_multidisk, enduser_drive_number):
        self.absolute_path = absolute_clonezilla_img_path
        self.ecryptfs_info_dict = ecryptfs_info_dict
        self.is_needs_decryption = is_needs_decryption
        self.short_disk_device_node = short_disk_device_node
        self.is_display_multidisk = is_display_multidisk
        self.enduser_drive_number = enduser_drive_number
        self.user_notes = ""
        self.warning_dict = {}

        notes_filepath = os.path.join(dir, "rescuezilla.description.txt")
        if os.path.exists(notes_filepath):
            self.user_notes = Utility.read_file_into_string(notes_filepath)

        if is_display_multidisk:
            multidisk_desc = _("Drive {drive_number}".format(
                drive_number=str(self.enduser_drive_number)))
            self.enduser_filename = enduser_filename + " (" + multidisk_desc + ")"
        else:
            self.enduser_filename = enduser_filename

        statbuf = os.stat(self.absolute_path)
        self.last_modified_timestamp = format_datetime(
            datetime.fromtimestamp(statbuf.st_mtime))
        print("Last modified timestamp " + self.last_modified_timestamp)

        self.image_format = "CLONEZILLA_FORMAT"

        self.short_device_node_partition_list = short_device_node_partition_list
        self.short_device_node_disk_list = [short_disk_device_node]
        self.ebr_dict = {}
        self.lvm_vg_dev_dict = {}
        self.lvm_logical_volume_dict = {}
        self.dev_fs_dict = {}

        if not self.is_needs_decryption:
            # The 'dev-fs.list' file contains the association between device nodes and the filesystems
            # (eg '/dev/sda2 ext4'). The filesystems are a combination of several sources, so the values may differ from
            # `blkid` and `parted`. Given newer versions of Clonezilla create this file, it makes sense to process it.
            dev_fs_list_filepath = os.path.join(dir, "dev-fs.list")
            if isfile(dev_fs_list_filepath):
                self.dev_fs_dict = ClonezillaImage.parse_dev_fs_list_output(
                    Utility.read_file_into_string(dev_fs_list_filepath))
            else:
                # Not raising exception because older Clonezilla images don't have this file.
                print("Unable to locate " + dev_fs_list_filepath)

            # The 'blkid.list' file provides a snapshot of the partitions on the system at the time of backup. This data
            # is not particularly relevant during a restore operation, except potentially for eg, UUID.
            #
            # Danger: Do not mistake this structure for the current system's 'blkid' information.
            # TODO: Re-evaluate the need to parse this file. The risk of mistaken usage may outweigh its usefulness.
            self.blkid_dict = {}
            blkid_list_filepath = os.path.join(dir, "blkid.list")
            if isfile(blkid_list_filepath):
                self.blkid_dict = Blkid.parse_blkid_output(
                    Utility.read_file_into_string(
                        os.path.join(dir, blkid_list_filepath)))
            else:
                # Not raising exception because older Clonezilla images don't have this file.
                print("Unable to locate " + blkid_list_filepath)

            # The 'lvm_vg_dev.list' file contains the association between an LVM VG (Logical Volume Manager volume group)
            # with a name eg 'vgtest', the LVM PV (physical volume) with a UUID name, and the device node that the physical
            # volume resides on eg, /dev/sdb.
            lvm_vg_dev_list_filepath = os.path.join(dir, "lvm_vg_dev.list")
            if isfile(
                    lvm_vg_dev_list_filepath) and not self.is_needs_decryption:
                self.lvm_vg_dev_dict = Lvm.parse_volume_group_device_list_string(
                    Utility.read_file_into_string(lvm_vg_dev_list_filepath))
            else:
                print("No LVM volume group to device file detected in image")

            # The 'lvm_logv.list' file contains the association between device nodes and the filesystems (eg '/dev/sda2 ext4')
            lvm_logv_list_filepath = os.path.join(dir, "lvm_logv.list")
            if isfile(lvm_logv_list_filepath) and not self.is_needs_decryption:
                self.lvm_logical_volume_dict = Lvm.parse_logical_volume_device_list_string(
                    Utility.read_file_into_string(lvm_logv_list_filepath))
            else:
                print("No LVM logical volume file detected in image")

        self.parted_dict = {}
        self._mbr_absolute_path = {}
        self.post_mbr_gap_absolute_path = {}
        self.size_bytes = 0
        self.enduser_readable_size = "unknown"
        self.size_bytes = 0
        # Clonezilla -pt.parted file lists size in sectors, rather than bytes (or end-user readable KB/MB/GB/TB as
        # Clonezilla's -pt.parted.compact file)
        parted_filepath = os.path.join(dir,
                                       short_disk_device_node + "-pt.parted")
        if isfile(parted_filepath) and not self.is_needs_decryption:
            self.parted_dict = Parted.parse_parted_output(
                Utility.read_file_into_string(parted_filepath))
            if 'capacity' in self.parted_dict and 'logical_sector_size' in \
                    self.parted_dict:
                self.size_bytes = self.parted_dict['capacity'] * \
                                  self.parted_dict['logical_sector_size']
            else:
                raise Exception("Unable to calculate disk capacity using " +
                                parted_filepath + ": " + str(self.parted_dict))
        else:
            # Do not raise exception because parted partition table is not present when using 'saveparts'
            print("Unable to locate " + parted_filepath +
                  " or file is encrypted")

        if self.ecryptfs_info_dict is not None and 'size' in self.ecryptfs_info_dict.keys(
        ):
            self.enduser_readable_size = self.ecryptfs_info_dict['size'].strip(
                "_")

        self.normalized_sfdisk_dict = {
            'absolute_path': None,
            'sfdisk_dict': {
                'partitions': {}
            },
            'file_length': 0
        }
        if not is_needs_decryption:
            sfdisk_absolute_path = os.path.join(
                dir, short_disk_device_node + "-pt.sf")
            self.normalized_sfdisk_dict = Sfdisk.generate_normalized_sfdisk_dict(
                sfdisk_absolute_path, self)

        # There is a maximum of 1 MBR per drive (there can be many drives). Master Boot Record (MBR) is never
        # listed in 'parts' list.
        self._mbr_absolute_path = None
        mbr_glob_list = glob.glob(
            os.path.join(dir, short_disk_device_node) + "-mbr")
        for absolute_mbr_filepath in mbr_glob_list:
            self._mbr_absolute_path = absolute_mbr_filepath

        # There is a maximum of 1 post-MBR gap per drive (there can be many drives). The post MBR gap is never
        # listed in 'parts' list. Note the asterisk wildcard in the glob, to get the notes.txt file (see below)
        post_mbr_gap_glob_list = glob.glob(
            os.path.join(dir, short_disk_device_node) +
            "-hidden-data-after-mbr*")
        for absolute_post_mbr_gap_filepath in post_mbr_gap_glob_list:
            if absolute_post_mbr_gap_filepath.endswith(
                    ".notes.txt") and not isfile(
                        os.path.join(dir, short_disk_device_node) +
                        "-hidden-data-after-mbr"):
                # When the post-MBR gap is not created by Clonezilla due to >1024 MB gap between MBR and first partition
                # there is a "notes.txt" file created which explains this. To maximize compatibility, in this
                # situation Rescuezilla v2.1+ creates a 1MB post-MBR  gap backup *and* a notes.txt file.
                self.warning_dict[
                    short_disk_device_node +
                    "mbr"] = "Backup is missing the \"post-MBR gap\" backup, most likely due to Clonezilla detecting a >1024MB gap between the MBR partition table and the first partition. Any GRUB bootloaders present will not restore correctly. In order to boot after restoring this backup, Clonezilla happens to workaround this situation by automatically re-installing GRUB, but current version of Rescuezilla does not implement this (but will in a future version). Clonezilla is available from within the Rescuezilla live environment by running `clonezilla` in a Terminal. See the following link for more information: https://github.com/rescuezilla/rescuezilla/issues/146"
            else:
                self.post_mbr_gap_absolute_path = {
                    'absolute_path': absolute_post_mbr_gap_filepath
                }

        # There is a maximum of 1 EBR per drive (there can be many drives). Extended Boot Record (EBR) is never
        # listed in 'parts' list. The asterisk is needed here because unlike the MBR case, the ebr file is eg,
        # sda4-ebr. In otherwords the EBR is associated with a partition not the base device node.
        ebr_glob_list = glob.glob(
            os.path.join(dir, short_disk_device_node) + "*-ebr")
        for absolute_ebr_filepath in ebr_glob_list:
            short_ebr_device_node = basename(absolute_ebr_filepath).split(
                "-ebr")[0]
            self.ebr_dict = {
                'short_device_node': short_ebr_device_node,
                'absolute_path': absolute_ebr_filepath
            }

        self.image_format_dict_dict = collections.OrderedDict([])
        # Loops over the partitions listed in the 'parts' file
        for short_partition_device_node in self.short_device_node_partition_list:
            has_found_atleast_one_associated_image = False
            # For standard MBR and GPT partitions, the partition key listed in the 'parts' file has a directly
            # associated backup image, so check for this.
            image_format_dict = ClonezillaImage.scan_backup_image(
                dir, short_partition_device_node, self.is_needs_decryption)
            # If no match found check the LVM (Logical Volume Manager)
            if len(image_format_dict) == 0:
                # Loop over all the volume groups (if any)
                for vg_name in self.lvm_vg_dev_dict.keys():
                    # TODO: Evalulate if there are Linux multipath device nodes that hold LVM Physical Volumes.
                    # TODO: May need to adjust for multipath device node by replacing "/" with "-" for this node.
                    pv_short_device_node = re.sub(
                        '/dev/', '',
                        self.lvm_vg_dev_dict[vg_name]['device_node'])
                    # Check if there is an associated LVM Physical Volume (PV) present
                    if short_partition_device_node == pv_short_device_node:
                        # Yes, the partition being analysed is associated with an LVM physical volume that contains
                        # an LVM Volume Group. Now determine all backup images associated to Logical Volumes that
                        # reside within this Volume Group.
                        for lv_path in self.lvm_logical_volume_dict.keys():
                            candidate_lv_path_prefix = "/dev/" + vg_name + "/"
                            # Eg, "/dev/cl/root".startswith("/dev/cl")
                            if lv_path.startswith(candidate_lv_path_prefix):
                                # Found a logical volume. Note: There may be more than one LV associated with an VG
                                # Set the scan prefix for the backup image to eg "cl-root"
                                logical_volume_scan_key = re.sub(
                                    '/', '-', re.sub('/dev/', '', lv_path))
                                image_format_dict = ClonezillaImage.scan_backup_image(
                                    dir, logical_volume_scan_key,
                                    self.is_needs_decryption)
                                if len(image_format_dict) != 0:
                                    image_format_dict[
                                        'is_lvm_logical_volume'] = True
                                    image_format_dict[
                                        'volume_group_name'] = vg_name
                                    image_format_dict['physical_volume_long_device_node'] = \
                                    self.lvm_vg_dev_dict[vg_name]['device_node']
                                    image_format_dict[
                                        'logical_volume_long_device_node'] = lv_path
                                    self.image_format_dict_dict[
                                        logical_volume_scan_key] = image_format_dict
                                    has_found_atleast_one_associated_image = True
            else:
                has_found_atleast_one_associated_image = True
                self.image_format_dict_dict[
                    short_partition_device_node] = image_format_dict

            if not has_found_atleast_one_associated_image:
                self.image_format_dict_dict[short_partition_device_node] = {
                    'type': "missing",
                    'prefix': short_partition_device_node,
                    'is_lvm_logical_volume': False
                }
                # TODO: Improve conversion between /dev/ nodes to short dev node.
                long_partition_key = "/dev/" + short_partition_device_node
                if long_partition_key in self.dev_fs_dict.keys():
                    # Annotate have filesystem information from dev-fs.list file. This case is expected when during a
                    # backup Clonezilla or Rescuezilla failed to successfully image the filesystem, but may have
                    # succeeded for other filesystems.
                    fs = self.dev_fs_dict[long_partition_key]['filesystem']
                    self.image_format_dict_dict[short_partition_device_node][
                        'filesystem'] = fs
                    # TODO: Consider removing warning_dict as image_format_dict is sufficient.
                    self.warning_dict[short_partition_device_node] = fs
                elif self.is_needs_decryption:
                    self.warning_dict[short_partition_device_node] = _(
                        "Needs decryption")
                else:
                    self.warning_dict[short_partition_device_node] = _(
                        "Unknown filesystem")

        # Unfortunately swap partitions are not listed in the 'parts' file. There does not appear to be any alternative
        # but scanning for the swap partitions and add them to the existing partitions, taking care to avoid duplicates
        # by rescanning what has already been scanned due to listing as an LVM logical volume.
        swap_partition_info_glob_list = glob.glob(
            os.path.join(dir, "swappt-*.info"))
        for swap_partition_info_glob in swap_partition_info_glob_list:
            key = Swappt.get_short_device_from_swappt_info_filename(
                swap_partition_info_glob)
            already_scanned = False
            for image_format_dict_key in self.image_format_dict_dict.keys():
                if key == self.image_format_dict_dict[image_format_dict_key][
                        "prefix"]:
                    already_scanned = True
                    break
            if not already_scanned and not self.is_needs_decryption:
                self.image_format_dict_dict[key] = Swappt.parse_swappt_info(
                    Utility.read_file_into_string(swap_partition_info_glob))
                self.image_format_dict_dict[key]['type'] = "swap"
                self.image_format_dict_dict[key]['prefix'] = key
                self.image_format_dict_dict[key][
                    'is_lvm_logical_volume'] = False

        total_size_estimate = 0
        # Now we have all the images, compute the partition size estimates, and save it to avoid recomputing.
        for image_format_dict_key in self.image_format_dict_dict.keys():
            # Try to find the short_disk_key for the image. This key is used to access the parted and sfdisk
            # partition table backups. It's not guaranteed there is a direct association between the backup image and
            # the partition table (for example, Logical Volume Manager logical volumes).
            associated_short_disk_key = ""
            for short_disk_key in self.short_device_node_disk_list:
                if image_format_dict_key.startswith(short_disk_key):
                    associated_short_disk_key = short_disk_key
            estimated_size_bytes = self._compute_partition_size_byte_estimate(
                associated_short_disk_key, image_format_dict_key)
            self.image_format_dict_dict[image_format_dict_key][
                'estimated_size_bytes'] = estimated_size_bytes
            total_size_estimate += estimated_size_bytes

        if self.size_bytes == 0:
            # For md RAID devices, Clonezilla doesn't have a parted of sfdisk partition table containing the hard drive
            # size, so in that situation, summing the image sizes provides some kind of size estimate.
            self.size_bytes = total_size_estimate

        # Covert size in bytes to KB/MB/GB/TB as relevant
        self.enduser_readable_size = Utility.human_readable_filesize(
            int(self.size_bytes))

        pp = pprint.PrettyPrinter(indent=4)
        pp.pprint(self.image_format_dict_dict)
Пример #13
0
    def __init__(self, absolute_fogproject_img_path, enduser_filename,
                 filename):
        self.image_format = "FOGPROJECT_FORMAT"
        self.absolute_path = absolute_fogproject_img_path
        self.enduser_filename = enduser_filename
        self.user_notes = ""
        self.warning_dict = {}

        # Clonezilla format
        self.ebr_dict = {}
        self.efi_nvram_dat_absolute_path = None
        self.short_device_node_partition_list = []
        self.short_device_node_disk_list = []
        self.lvm_vg_dev_dict = {}
        self.lvm_logical_volume_dict = {}
        self.sfdisk_chs_dict = None
        self.dev_fs_dict = {}
        self.size_bytes = 0
        self.enduser_readable_size = ""
        self.is_needs_decryption = False
        self.normalized_sfdisk_dict = {
            'absolute_path': None,
            'sfdisk_dict': {
                'partitions': {}
            },
            'file_length': 0
        }
        self.parted_dict = {'partitions': {}}
        self.post_mbr_gap_dict = {}

        if filename.endswith(".partitions") and not filename.endswith(
                ".minimum.partitions"):
            prefix = filename.split(".partitions")[0]
            print("prefix: " + prefix)
        else:
            raise ValueError(
                "Expected FOG Project backup to end with .partition: " +
                absolute_fogproject_img_path)

        statbuf = os.stat(self.absolute_path)
        self.last_modified_timestamp = format_datetime(
            datetime.fromtimestamp(statbuf.st_mtime))
        print("Last modified timestamp " + self.last_modified_timestamp)

        dir = Path(absolute_fogproject_img_path).parent.as_posix()
        print("FOG Project directory : " + dir)

        dirname = os.path.dirname(absolute_fogproject_img_path)
        # FogProject has its main sfdisk file named eg, 'd1.partitions', but it may also have an unmodified sfdisk
        # file named eg, "d1.minimum.partitions). This approach is apparently used to support restoring to disks
        # smaller than the original. Rescuezilla intends to implement something similar [1], but for now we always
        # use the original, unmodified file (which may be "d1.partitions" if the FOG Project's resize checkbox was not
        # ticked during backup).
        #
        # [1] https://github.com/rescuezilla/rescuezilla/issues/18
        minimum_partitions_sfdisk_path = os.path.join(
            dir, prefix + ".minimum.partitions")
        if os.path.exists(minimum_partitions_sfdisk_path):
            # TODO: Implement FOG Project's intelligent partition resizing. [1] Rescuezilla has growing filesystems,
            # TODO: but not yet modifying partitions.
            # [1] https://github.com/rescuezilla/rescuezilla/issues/18
            print(
                "Found minimum partitions, but ignoring until partition resize implemented"
            )

        sfdisk_absolute_path = absolute_fogproject_img_path
        self.normalized_sfdisk_dict = Sfdisk.generate_normalized_sfdisk_dict(
            sfdisk_absolute_path, self)

        # FOG Project images sometimes contains a file named eg, "d1.original.fstypes" which contains the association
        # between partition numbers with filesystems considered resizable, and their associated device nodes.
        self.original_fstypes_dict = {}
        original_fstypes_filepath = os.path.join(dir,
                                                 prefix + ".original.fstypes")
        if isfile(original_fstypes_filepath):
            self.original_fstypes_dict = FogProjectImage.parse_original_fstypes_output(
                Utility.read_file_into_string(original_fstypes_filepath))

        # FOG Project images sometimes contains a file named eg, "d1.fixed_sized_partitions" which is a colon separated
        # list of the partition numbers that *cannot* be resized.
        self.fixed_size_partitions = []
        fixed_sized_partitions_path = os.path.join(
            dir, prefix + ".fixed_sized_partitions")
        if os.path.exists(fixed_sized_partitions_path):
            fixed_sized_partitions_string = Utility.read_file_into_string(
                fixed_sized_partitions_path).strip()
            self.fixed_size_partitions = fixed_sized_partitions_string.split(
                ":")
            # Convert every element to from a string to an int
            self.fixed_size_partitions = list(
                map(int, self.fixed_size_partitions))

        # FOG Project images sometimes contains a file named eg, "d1.original.swapuuids" which contains the association
        # between a partition number and the UUID of the Linux swap partition that resides on there.
        self.original_swapuuids_dict = {}
        original_swapuuids_filepath = os.path.join(
            dir, prefix + ".original.swapuuids")
        if isfile(original_swapuuids_filepath):
            self.original_swapuuids_dict = FogProjectImage.parse_original_swapuuids_output(
                Utility.read_file_into_string(original_swapuuids_filepath))

        if 'device' in self.normalized_sfdisk_dict['sfdisk_dict'].keys():
            self.long_device_node_disk_list = [
                self.normalized_sfdisk_dict['sfdisk_dict']['device']
            ]

        # FOG Project images sometimes has a file named eg, ".size" that contains the drive size in bytes.
        size_path = os.path.join(dir, prefix + ".size")
        if os.path.exists(size_path):
            # When the file exists, use it.
            size_string = Utility.read_file_into_string(size_path).strip()
            # TODO: split smarter
            split = size_string.split(":")
            self.size_bytes = int(split[1])

        self._mbr_absolute_path = None
        mbr_path_string = os.path.join(dirname, prefix + ".mbr")
        if os.path.exists(mbr_path_string):
            self._mbr_absolute_path = mbr_path_string
        else:
            self.warning_dict[self.normalized_sfdisk_dict['sfdisk_dict']
                              ['device']] = "Missing MBR"

        self.has_grub = False
        has_grub_path_string = os.path.join(dirname, prefix + "has_grub")
        if os.path.exists(has_grub_path_string):
            self.has_grub = True

        if 'device' in self.normalized_sfdisk_dict['sfdisk_dict'].keys():
            self.short_device_node_disk_list = [
                self.normalized_sfdisk_dict['sfdisk_dict']['device']
            ]

        self.partclone_info_dict_dict = collections.OrderedDict([])
        self.image_format_dict_dict = collections.OrderedDict([])
        for long_device_node in self.normalized_sfdisk_dict['sfdisk_dict'][
                'partitions'].keys():
            if self.normalized_sfdisk_dict['sfdisk_dict']['partitions'][
                    long_device_node]['type'] == "27":
                # TODO populate
                continue
            base_device_node, partition_number = Utility.split_device_string(
                long_device_node)
            if partition_number in self.original_swapuuids_dict.keys():
                # Swap handled below
                continue
            # The partclone image FOG Project creates can be compressed as either gzip or zstd, can be split
            # into multiple files, depending on the settings the user configures. The compression format is not encoded
            # in the filename.
            #
            # Split:    d1p3.img     (may or may not be compressed)
            # No split: d1p3.img.001 (may or may not be compressed)
            image_match_string = prefix + "p" + str(partition_number) + ".img*"
            image_match_path_string = os.path.join(dirname, image_match_string)
            print(image_match_path_string)
            # Get absolute path partition images. Eg, [/path/to/dlp3.img.001, /path/to/dlp3.img.002 etc]
            abs_image_list = glob.glob(image_match_path_string)
            # Sort by alphabetical sort. Lexical sort not required here because fixed number of digits (so no risk of
            # "1, 10, 2, 3" issues), and partclone manages this when the number of digits is no longer fixed (see
            # above)
            abs_image_list.sort()
            if len(abs_image_list) == 0:
                self.warning_dict[long_device_node] = _(
                    "Cannot find partition's associated partclone image"
                ) + "\n        " + image_match_string
                continue

            detected_compression = Utility.detect_compression(abs_image_list)
            # May be PartImage image.
            self.partclone_info_dict_dict[
                long_device_node] = Partclone.get_partclone_info_dict(
                    abs_image_list, long_device_node, detected_compression)
            filesystem = self.partclone_info_dict_dict[long_device_node][
                'filesystem'].lower()
            print(str(self.partclone_info_dict_dict))

            if filesystem != "<unknown>" and filesystem != "raw":
                self.image_format_dict_dict[long_device_node] = {
                    'type': "partclone",
                    'absolute_filename_glob_list': abs_image_list,
                    'compression': detected_compression,
                    'filesystem': filesystem,
                    'binary': "partclone." + filesystem,
                    "prefix": prefix,
                    'is_lvm_logical_volume': False
                }
            else:
                self.image_format_dict_dict[long_device_node] = {
                    'type': "dd",
                    'absolute_filename_glob_list': abs_image_list,
                    'compression': detected_compression,
                    'filesystem': filesystem,
                    'binary': "partclone.dd",
                    "prefix": prefix,
                    'is_lvm_logical_volume': False
                }

        for swapuuid_key in self.original_swapuuids_dict.keys():
            # The key to the Swap UUID is the partition number, but long_device_node is much more convenient
            long_device_node = Utility.join_device_string(
                self.short_device_node_disk_list[0], swapuuid_key)
            print("Converting swap partition number " + str(swapuuid_key) +
                  " to key " + long_device_node)
            self.image_format_dict_dict[long_device_node] = {
                'type': "swap",
                'uuid': self.original_swapuuids_dict[swapuuid_key],
                'label': "",
                'filesystem': "swap",
                "prefix": prefix,
                'is_lvm_logical_volume': False
            }
        if self.size_bytes == 0:
            # When the file doesn't exist, estimate drive capacity from sfdisk partition table backup
            last_partition_key, last_partition_final_byte = Sfdisk.get_highest_offset_partition(
                self.normalized_sfdisk_dict)
            self.size_bytes = last_partition_final_byte
        # Covert size in bytes to KB/MB/GB/TB as relevant
        self.enduser_readable_size = Utility.human_readable_filesize(
            int(self.size_bytes))
        self.is_needs_decryption = False
Пример #14
0
    def __init__(self, partition_long_device_node, absolute_path=None, enduser_filename=None):
        self.image_format = "METADATA_ONLY_FORMAT"
        self.long_device_node = partition_long_device_node
        if absolute_path is None:
            self.absolute_path = partition_long_device_node
        else:
            self.absolute_path = absolute_path

        if enduser_filename is None:
            self.absolute_path = partition_long_device_node
        else:
            self.enduser_filename = enduser_filename
        self.normalized_sfdisk_dict = {'absolute_path': None, 'sfdisk_dict': {'partitions': {}}, 'file_length': 0}
        self.user_notes = ""
        self.warning_dict = {}

        # Clonezilla format
        self.ebr_dict = {}
        self.efi_nvram_dat_absolute_path = None
        self.short_device_node_partition_list = []
        self.short_device_node_disk_list = []
        self.lvm_vg_dev_dict = {}
        self.lvm_logical_volume_dict = {}
        self.sfdisk_chs_dict = None
        self.dev_fs_dict = {}
        self.size_bytes = 0
        self.enduser_readable_size = ""
        self.is_needs_decryption = False
        self.parted_dict = {'partitions': {}}
        self.post_mbr_gap_dict = {}
        self._mbr_absolute_path = None

        statbuf = os.stat(self.absolute_path)
        self.last_modified_timestamp = format_datetime(datetime.fromtimestamp(statbuf.st_mtime))
        print("Last modified timestamp " + self.last_modified_timestamp)

        process, flat_command_string, failed_message = Utility.run("Get partition table", ["sfdisk", "--dump", partition_long_device_node], use_c_locale=True)
        if process.returncode != 0:
            # Expect devices without a partition table to not be able to extract partition table
            print("Could not extract filesystem using sfdisk: " + process.stderr)
        else:
            sfdisk_string = process.stdout
            f = tempfile.NamedTemporaryFile(mode='w', delete=False)
            f.write(sfdisk_string)
            f.close()
            self.normalized_sfdisk_dict = Sfdisk.generate_normalized_sfdisk_dict(f.name, self)
        if 'device' in self.normalized_sfdisk_dict['sfdisk_dict'].keys():
            self.short_device_node_disk_list = [self.normalized_sfdisk_dict['sfdisk_dict']['device']]

        # Get the parted partition table. For convenience, using the bytes unit, not sectors.
        parted_process, flat_command_string, failed_message = Utility.run("Get filesystem information",
                                                          ["parted", "--script", partition_long_device_node, "unit", "b",
                                                           "print"], use_c_locale=True)
        if parted_process.returncode != 0:
            # Expect devices without a partition table to not be able to extract partition table
            print("Could not extract filesystem using parted: " + parted_process.stderr)
        self.parted_dict = Parted.parse_parted_output(parted_process.stdout)
        if len(self.short_device_node_disk_list) == 0 and 'long_dev_node' in self.parted_dict.keys():
            self.short_device_node_disk_list = [self.parted_dict['long_dev_node']]

        pp = pprint.PrettyPrinter(indent=4)
        pp.pprint(self.parted_dict)

        lsblk_process, flat_command_string, failed_message = Utility.run("Querying device capacity",
                                                                         ["lsblk", "--getsize64", partition_long_device_node],
                                                                         use_c_locale=True)
        if lsblk_process.returncode != 0:
            # Expected for NBD device nodes
            print("Failed to get drive capacity from device node")

        # Create a CombinedDriveState structure for the MetadataOnlyImage, which may otherwise not be populated.
        lsblk_cmd_list = ["lsblk", "-o", "KNAME,NAME,SIZE,TYPE,FSTYPE,MOUNTPOINT,MODEL,SERIAL", "--paths", "--bytes",
                          "--json", self.long_device_node]
        process, flat_command_string, fail_description = Utility.run("lsblk", lsblk_cmd_list, use_c_locale=True)
        lsblk_json_dict = json.loads(process.stdout)

        # blkid is called in DriveQuery and without arugments it prints information about all *partitions* in the system
        # (eg, /dev/sda1, /dev/sda2), but not th base device. But with an argument, it only prints out the base device.
        # But globbing using an wildcard match prints out the base device *and* the partitions. Not ideal, but it works.
        partition_device_glob_list = glob.glob(self.long_device_node + "*")
        blkid_cmd_list = ["blkid"] + partition_device_glob_list
        process, flat_command_string, fail_description = Utility.run("blkid", blkid_cmd_list, use_c_locale=True)
        blkid_dict = Blkid.parse_blkid_output(process.stdout)

        # OS Prober takes too long to run
        os_prober_dict = {}

        self.drive_state = CombinedDriveState.construct_combined_drive_state_dict(lsblk_json_dict=lsblk_json_dict,
                                                                                  blkid_dict=blkid_dict,
                                                                                  osprober_dict=os_prober_dict,
                                                                                  parted_dict_dict={self.long_device_node:self.parted_dict},
                                                                                  sfdisk_dict_dict={self.long_device_node:self.normalized_sfdisk_dict})
        pp = pprint.PrettyPrinter(indent=4)
        pp.pprint(self.drive_state)

        self.image_format_dict_dict = collections.OrderedDict([])
        total_size_estimate = 0
        drive_state_partitions_dict = self.drive_state[self.long_device_node]['partitions']
        for partition_long_device_node in drive_state_partitions_dict:
            if 'type' in drive_state_partitions_dict[partition_long_device_node].keys() \
                    and drive_state_partitions_dict[partition_long_device_node]['type'] == "extended":
                # Skip extended partitions as they will be handled by the '-ebr' file
                continue
            self.image_format_dict_dict[partition_long_device_node] = {'type': "raw",
                                                                       'compression': "uncompressed",
                                                                       'is_lvm_logical_volume': False,
                                                                       'filesystem': drive_state_partitions_dict[partition_long_device_node]['filesystem']}

        # Estimate the disk size from sfdisk partition table backup
        last_partition_key, last_partition_final_byte = Sfdisk.get_highest_offset_partition(self.normalized_sfdisk_dict)
        self.size_bytes = last_partition_final_byte
        if self.size_bytes == 0:
            self.size_bytes = self.parted_dict['capacity']
        # Covert size in bytes to KB/MB/GB/TB as relevant
        self.enduser_readable_size = Utility.human_readable_filesize(int(self.size_bytes))
Пример #15
0
    def __init__(self, absolute_path, enduser_filename, filename):
        # Redo Backup images never need decryption
        self.is_needs_decryption = False
        self.absolute_path = absolute_path
        self.enduser_filename = enduser_filename
        print("Reading backup : " + absolute_path)
        dirname = os.path.dirname(absolute_path)
        self.user_notes = ""
        self.warning_dict = {}

        # Clonezilla format
        self.ebr_dict = {}
        self.short_device_node_partition_list = []
        self.short_device_node_disk_list = []
        self.lvm_vg_dev_dict = {}
        self.lvm_logical_volume_dict = {}
        self.dev_fs_dict = {}
        self.size_bytes = 0
        self.enduser_readable_size = ""
        self.is_needs_decryption = False
        self.normalized_sfdisk_dict = {'absolute_path': None, 'sfdisk_dict': {'partitions': {}}, 'file_length': 0}
        self.parted_dict = {'partitions': {}}
        self.post_mbr_gap_absolute_path = {}

        self.compression = "gzip"
        if filename.endswith(".redo"):
            prefix = filename.split(".redo")[0]
            print("prefix: " + prefix)
            self.compression = "uncompressed"
            self.image_format = "REDOBACKUP_0.9.2_FORMAT"
        elif filename.endswith(".backup"):
            prefix = filename.split(".backup")[0]
            print("prefix: " + prefix)
            rescuezilla_version_abs_path = os.path.join(dirname, prefix + ".rescuezilla.backup_version")
            if not os.path.exists(rescuezilla_version_abs_path):
                self.image_format = "REDOBACKUP_0.9.3_1.0.4_FORMAT"
            else:
                self.image_format = "RESCUEZILLA_1.0.5_FORMAT"
                self.rescuezilla_version = Utility.read_file_into_string(rescuezilla_version_abs_path).strip()
                print("Backup originally created with Rescuezilla version: " + self.rescuezilla_version)
        else:
            raise ValueError("Expected Rescuezilla backup to end with .backup: " + absolute_path)

        self.last_modified_timestamp = format_datetime(os.stat(absolute_path).st_mtime)
        print("Last modified timestamp " + self.last_modified_timestamp)

        self.short_device_node_partition_list = Utility.read_linebreak_delimited_file_into_list(absolute_path)
        print("Source_partitions: " + str(self.short_device_node_partition_list))

        self.size_bytes = int(Utility.read_file_into_string(os.path.join(dirname, prefix + ".size").strip()))
        print("Size: " + str(self.size_bytes))
        # Covert size in bytes to KB/MB/GB/TB as relevant
        self.enduser_readable_size = Utility.human_readable_filesize(int(self.size_bytes))

        self._mbr_absolute_path = os.path.join(dirname, prefix + ".mbr")
        # Get the size of the MBR image because a Sourceforge user named chcatzsf released two unofficial
        # German-language Redo Backup and Recovery update based on Ubuntu 13.10 and Ubuntu 14.04. These two versions
        # incorrectly created 512 byte Master Boot Record backup images. More information [1].
        #
        # [1] https://github.com/rescuezilla/rescuezilla/wiki/Bugs-in-unofficial-Redo-Backup-updates#bugs-in-chcatzsfs-ubuntu-1310-and-1404-releases-german-language-only
        self.mbr_size = int(os.stat(self._mbr_absolute_path).st_size)
        if self.mbr_size == 512:
            # Explain the situation to users with this issue and link to further information about how the GRUB boot
            # loader can be regenerated, and confirm whether they wish to proceed.
            truncated_bootloader_bug_url = "https://github.com/rescuezilla/rescuezilla/wiki/Bugs-in-unofficial-Redo-Backup-updates#bugs-in-chcatzsfs-ubuntu-1310-and-1404-releases-german-language-only";
            # Context for translators: Two popular unofficial Redo Backup v1.0.4 updates by Sourceforge user chcatzsf
            # have major bugs where bootloaders like GRUB are not not fully backed up, so Linux-based operating
            # cannot boot after a restore. This bug only affected those two updates (German-language only) and the
            # problem can be fixed with careful manual intervention. Translating this message into languages other
            # than English and German is not required. Full details in:
            # https://github.com/rescuezilla/rescuezilla/wiki/Bugs-in-unofficial-Redo-Backup-updates#bugs-in
            # -chcatzsfs-ubuntu-1310-and-1404-releases-german-language-only
            self.warning_dict[enduser_filename] = _("The backup's bootloader data is shorter than expected. This happens with backups created by an unofficial Redo Backup update. If the backup contained certain bootloaders like GRUB, the restored hard drive will not boot correctly without a manual fix. All data is still fully recoverable but manual intervention may required to restore the bootloader. Please consult {url} for information and assistance. The destination drive has not yet been modified. Do you wish to continue with the restore?").format(url=truncated_bootloader_bug_url)

        sfdisk_absolute_path = os.path.join(dirname, prefix + ".sfdisk")
        self.normalized_sfdisk_dict = Sfdisk.generate_normalized_sfdisk_dict(sfdisk_absolute_path, self)
        if not self.image_format == "RESCUEZILLA_1.0.5_FORMAT":
            self.normalized_sfdisk_dict['prefer_old_sfdisk_binary'] = True

        # Cannot rely on sfdisk drive name due to some Redo Backup versions not populating this file correctly.
        if 'device' in self.normalized_sfdisk_dict['sfdisk_dict'].keys():
            self.short_device_node_disk_list = [self.normalized_sfdisk_dict['sfdisk_dict']['device']]
        else:
            self.short_device_node_disk_list = ["unknown"]

        # The NVMe drive handling on Rescuezilla v1.0.5.1+ set the drive name in the .backup file start with "sdz" to
        # preserve the ability to restore with older versions of Rescuezilla (which read the source drive). Since v2.0,
        # this string is used as a key into sfdisk partitions, so the ".backup" partition list such containing elements
        # like "sdz3" needs to be renamed to eg, "nvme0n1p3" here to preserve full backwards compatibility.
        if 'partitions' in self.normalized_sfdisk_dict['sfdisk_dict'].keys() and len(self.normalized_sfdisk_dict['sfdisk_dict']['partitions'].keys()) > 1:
            # Long drive node extracted from sfdisk dictionary
            sfdisk_long_drive_node = list(self.normalized_sfdisk_dict['sfdisk_dict']['partitions'].keys())[0]
            actual_base_device_node, first_partition_number = Utility.split_device_string(sfdisk_long_drive_node)
            for i in range(len(self.short_device_node_partition_list)):
                node_to_potentially_rename = self.short_device_node_partition_list[i]
                if node_to_potentially_rename.startswith("sdz") and actual_base_device_node != "sdz":
                    # This node is renamed, and the list is updated.
                    fake_base_device_node, actual_partition_number = Utility.split_device_string(node_to_potentially_rename)
                    corrected_long_device_node = Utility.join_device_string(actual_base_device_node, actual_partition_number)
                    corrected_short_device_node = re.sub('/dev/', '', corrected_long_device_node)
                    self.short_device_node_partition_list[i] = corrected_short_device_node

        self.partclone_info_dict_dict = collections.OrderedDict([])
        self.image_format_dict_dict = collections.OrderedDict([])
        # All Rescuezilla v1.0.5 and Redo Backup and Recovery images use gzip compression EXCEPT the uncompressed v0.9.2
        compression = "gzip"
        for short_device_node in self.short_device_node_partition_list:
            base_device_node, partition_number = Utility.split_device_string(short_device_node)
            if self.image_format == "REDOBACKUP_0.9.2_FORMAT":
                # Redo Backup v0.9.2 are *not* split
                image_match_string = os.path.join(dirname, prefix + "_part" + str(partition_number))
                # ... and not compressed
                compression = "uncompressed"
            else:
                # Redo Backup v0.9.3-v1.0.4 and Rescuezilla v1.0.5 images are split up, eg "20200901_part2.000".
                # However need to be careful because Rescuezilla's backup directory contains a log files
                # eg, "20200901_part2_partclone.log", so the dot character before the asterisk is important. It prevents
                # the underscore from matching.
                image_match_string = os.path.join(dirname, prefix + "_part" + str(partition_number) + ".*")
            # Get absolute path partition images. Eg, [/path/to/20200813_part3.000, /path/to/20200813_part3.001 etc]
            abs_partclone_image_list = glob.glob(image_match_string)
            # Sort by alphabetical sort. Lexical sort not required here because fixed number of digits (so no risk
            # of "1, 10, 2, 3" issues)
            abs_partclone_image_list.sort()
            if len(abs_partclone_image_list) == 0:
                # The legacy Redo Backup and Recovery format cannot distinguish between failed partclone backup and a
                # user who chose not to backup a partition
                self.warning_dict[short_device_node] = _("Cannot find partition's associated partclone image")
                continue
            self.image_format_dict_dict[short_device_node] = {'abs_image_glob': abs_partclone_image_list}

            command = "partclone"
            filesystem = "<unknown>"
            # Rescuezilla v1.0.5 format creates partition to filesystem mapping files
            command_filepath = os.path.join(dirname, prefix + ".partclone.command.part" + str(partition_number))
            if os.path.isfile(command_filepath):
                command = Utility.read_file_into_string(command_filepath).strip()
                print(str(short_device_node) + ": " + command)
                restore_binary = command
                filesystem = re.sub('partclone.', '', restore_binary)

            use_old_partclone = False
            # Use partclone.info to extract filesystem and size information from the image files. This is a time
            # consuming operation but Redo 0.9.3-1.0.4 images benefit from this.
            # Rescuezilla 1.0.5 format has a backup of the filesystem (from the restore_command files), and the size (
            # from sfdisk)
            if self.image_format == "REDOBACKUP_0.9.3_1.0.4_FORMAT" or self.image_format == "REDOBACKUP_0.9.2_FORMAT":
                use_old_partclone = True
                self.partclone_info_dict_dict[short_device_node] = Partclone.get_partclone_info_dict(abs_partclone_image_list, short_device_node, compression)
                filesystem = self.partclone_info_dict_dict[short_device_node]['filesystem']
                print(str(self.partclone_info_dict_dict))
            if filesystem != "<unknown>" and filesystem != "raw":
                self.image_format_dict_dict[short_device_node] = {'type': "partclone",
                                                                 'absolute_filename_glob_list': abs_partclone_image_list,
                                                                 'compression': compression,
                                                                 'filesystem': filesystem,
                                                                 'binary': "partclone." + filesystem,
                                                                 "prefix": prefix,
                                                                 'use_old_partclone': use_old_partclone,
                                                                 'is_lvm_logical_volume': False}
            else:
                self.image_format_dict_dict[short_device_node] = {'type': "dd",
                                                                 'absolute_filename_glob_list': abs_partclone_image_list,
                                                                 'compression': compression,
                                                                 'binary': "partclone.dd",
                                                                 'filesystem': filesystem,
                                                                 "prefix": prefix,
                                                                 'use_old_partclone': use_old_partclone,
                                                                 'is_lvm_logical_volume': False}

            # Compute the partition size estimate, and save it to avoid recomputing.
            self.image_format_dict_dict[short_device_node]['estimated_size_bytes'] = self._compute_partition_size_byte_estimate(short_device_node)
Пример #16
0
    def do_backup(self):
        self.at_least_one_non_fatal_error = False
        self.requested_stop = False
        # Clear proc dictionary
        self.proc.clear()
        self.summary_message_lock = threading.Lock()
        self.summary_message = ""

        env = Utility.get_env_C_locale()

        print("mkdir " + self.dest_dir)
        os.mkdir(self.dest_dir)

        short_selected_device_node = re.sub('/dev/', '',
                                            self.selected_drive_key)
        enduser_date = datetime.today().strftime('%Y-%m-%d-%H%M')
        clonezilla_img_filepath = os.path.join(self.dest_dir, "clonezilla-img")
        with open(clonezilla_img_filepath, 'w') as filehandle:
            try:
                output = "This image was saved by Rescuezilla at " + enduser_date + "\nSaved by " + self.human_readable_version + "\nThe log during saving:\n----------------------------------------------------------\n\n"
                filehandle.write(output)
            except:
                tb = traceback.format_exc()
                traceback.print_exc()
                error_message = _(
                    "Failed to write destination file. Please confirm it is valid to create the provided file path, and try again."
                ) + "\n\n" + tb
                GLib.idle_add(self.completed_backup, False, error_message)
                return

        self.logger = Logger(clonezilla_img_filepath)
        GLib.idle_add(self.update_backup_progress_bar, 0)

        process, flat_command_string, failed_message = Utility.run(
            "Saving blkdev.list", [
                "lsblk", "-oKNAME,NAME,SIZE,TYPE,FSTYPE,MOUNTPOINT,MODEL",
                self.selected_drive_key
            ],
            use_c_locale=True,
            output_filepath=os.path.join(self.dest_dir, "blkdev.list"),
            logger=self.logger)
        if process.returncode != 0:
            with self.summary_message_lock:
                self.summary_message += failed_message
            GLib.idle_add(self.completed_backup, False, failed_message)
            return

        blkid_cmd_list = ["blkid"]
        sort_cmd_list = ["sort", "-V"]
        Utility.print_cli_friendly("blkid ", [blkid_cmd_list, sort_cmd_list])
        self.proc['blkid'] = subprocess.Popen(blkid_cmd_list,
                                              stdout=subprocess.PIPE,
                                              env=env,
                                              encoding='utf-8')

        process, flat_command_string, failed_message = Utility.run(
            "Saving blkid.list", ["blkid"],
            use_c_locale=True,
            output_filepath=os.path.join(self.dest_dir, "blkid.list"),
            logger=self.logger)
        if process.returncode != 0:
            with self.summary_message_lock:
                self.summary_message += failed_message
            GLib.idle_add(self.completed_backup, False, failed_message)
            return

        process, flat_command_string, failed_message = Utility.run(
            "Saving Info-lshw.txt", ["lshw"],
            use_c_locale=True,
            output_filepath=os.path.join(self.dest_dir, "Info-lshw.txt"),
            logger=self.logger)
        if process.returncode != 0:
            with self.summary_message_lock:
                self.summary_message += failed_message
            GLib.idle_add(self.completed_backup, False, failed_message)
            return

        info_dmi_txt_filepath = os.path.join(self.dest_dir, "Info-dmi.txt")
        with open(info_dmi_txt_filepath, 'w') as filehandle:
            filehandle.write(
                "# This image was saved from this machine with DMI info at " +
                enduser_date + ":\n")
            filehandle.flush()
        process, flat_command_string, failed_message = Utility.run(
            "Saving Info-dmi.txt", ["dmidecode"],
            use_c_locale=True,
            output_filepath=info_dmi_txt_filepath,
            logger=self.logger)
        if process.returncode != 0:
            with self.summary_message_lock:
                self.summary_message += failed_message
            GLib.idle_add(self.completed_backup, False, failed_message)
            return

        info_lspci_filepath = os.path.join(self.dest_dir, "Info-lspci.txt")
        with open(info_lspci_filepath, 'w') as filehandle:
            # TODO: Improve datetime format string.
            filehandle.write(
                "This image was saved from this machine with PCI info at " +
                enduser_date + "\n")
            filehandle.write("'lspci' results:\n")
            filehandle.flush()

        process, flat_command_string, failed_message = Utility.run(
            "Appending `lspci` output to Info-lspci.txt", ["lspci"],
            use_c_locale=True,
            output_filepath=info_lspci_filepath,
            logger=self.logger)
        if process.returncode != 0:
            with self.summary_message_lock:
                self.summary_message += failed_message
            GLib.idle_add(self.completed_backup, False, failed_message)
            return

        msg_delimiter_star_line = "*****************************************************."
        with open(info_lspci_filepath, 'a+') as filehandle:
            filehandle.write(msg_delimiter_star_line + "\n")
            filehandle.write("'lspci -n' results:\n")
            filehandle.flush()

        # Show PCI vendor and device codes as numbers instead of looking them up in the PCI ID list.
        process, flat_command_string, failed_message = Utility.run(
            "Appending `lspci -n` output to Info-lspci.txt", ["lspci", "-n"],
            use_c_locale=True,
            output_filepath=info_lspci_filepath,
            logger=self.logger)
        if process.returncode != 0:
            with self.summary_message_lock:
                self.summary_message += failed_message
            GLib.idle_add(self.completed_backup, False, failed_message)
            return

        info_smart_filepath = os.path.join(self.dest_dir, "Info-smart.txt")
        with open(info_smart_filepath, 'w') as filehandle:
            filehandle.write(
                "This image was saved from this machine with hard drive S.M.A.R.T. info at "
                + enduser_date + "\n")
            filehandle.write(msg_delimiter_star_line + "\n")
            filehandle.write("For the drive: " + self.selected_drive_key +
                             "\n")
            filehandle.flush()

        # VirtualBox doesn't support smart, so ignoring the exit code here.
        # FIXME: Improve this.
        process, flat_command_string, failed_message = Utility.run(
            "Saving Info-smart.txt",
            ["smartctl", "--all", self.selected_drive_key],
            use_c_locale=True,
            output_filepath=info_smart_filepath,
            logger=self.logger)

        filepath = os.path.join(self.dest_dir, "Info-packages.txt")
        # Save Debian package informtion
        if shutil.which("dpkg") is not None:
            rescuezilla_package_list = ["rescuezilla", "util-linux", "gdisk"]
            with open(filepath, 'w') as filehandle:
                filehandle.write(
                    "Image was saved by these Rescuezilla-related packages:\n "
                )
                for pkg in rescuezilla_package_list:
                    dpkg_process = subprocess.run(['dpkg', "--status", pkg],
                                                  capture_output=True,
                                                  encoding="UTF-8")
                    if dpkg_process.returncode != 0:
                        continue
                    for line in dpkg_process.stdout.split("\n"):
                        if re.search("^Version: ", line):
                            version = line[len("Version: "):]
                            filehandle.write(pkg + "-" + version + " ")
                filehandle.write("\nSaved by " + self.human_readable_version +
                                 ".\n")

        # TODO: Clonezilla creates a file named "Info-saved-by-cmd.txt" file, to allow users to re-run the exact
        #  command again without going through the wizard. The proposed Rescuezilla approach to this feature is
        #  discussed here: https://github.com/rescuezilla/rescuezilla/issues/106

        filepath = os.path.join(self.dest_dir, "parts")
        with open(filepath, 'w') as filehandle:
            i = 0
            for partition_key in self.partitions_to_backup:
                short_partition_key = re.sub('/dev/', '', partition_key)
                to_backup_dict = self.partitions_to_backup[partition_key]
                is_swap = False
                if 'filesystem' in to_backup_dict.keys(
                ) and to_backup_dict['filesystem'] == "swap":
                    is_swap = True
                if 'type' not in to_backup_dict.keys(
                ) or 'type' in to_backup_dict.keys(
                ) and 'extended' != to_backup_dict['type'] and not is_swap:
                    # Clonezilla does not write the extended partition node into the parts file,
                    # nor does it write swap partition node
                    filehandle.write('%s' % short_partition_key)
                    # Ensure no trailing space on final iteration (to match Clonezilla format exactly)
                    if i + 1 != len(self.partitions_to_backup.keys()):
                        filehandle.write(' ')
                i += 1
            filehandle.write('\n')

        filepath = os.path.join(self.dest_dir, "disk")
        with open(filepath, 'w') as filehandle:
            filehandle.write('%s\n' % short_selected_device_node)

        compact_parted_filename = short_selected_device_node + "-pt.parted.compact"
        # Parted drive information with human-readable "compact" units: KB/MB/GB rather than sectors.
        process, flat_command_string, failed_message = Utility.run(
            "Saving " + compact_parted_filename, [
                "parted", "--script", self.selected_drive_key, "unit",
                "compact", "print"
            ],
            use_c_locale=True,
            output_filepath=os.path.join(self.dest_dir,
                                         compact_parted_filename),
            logger=self.logger)
        if process.returncode != 0:
            with self.summary_message_lock:
                self.summary_message += failed_message
            GLib.idle_add(self.completed_backup, False, failed_message)
            return

        # Parted drive information with standard sector units. Clonezilla doesn't output easily parsable output using
        # the --machine flag, so for maximum Clonezilla compatibility neither does Rescuezilla.
        parted_filename = short_selected_device_node + "-pt.parted"
        parted_process, flat_command_string, failed_message = Utility.run(
            "Saving " + parted_filename, [
                "parted", "--script", self.selected_drive_key, "unit", "s",
                "print"
            ],
            use_c_locale=True,
            output_filepath=os.path.join(self.dest_dir, parted_filename),
            logger=self.logger)
        if process.returncode != 0:
            with self.summary_message_lock:
                self.summary_message += failed_message
            GLib.idle_add(self.completed_backup, False, failed_message)
            return

        parted_dict = Parted.parse_parted_output(parted_process.stdout)
        partition_table = parted_dict['partition_table']

        # Save MBR for both msdos and GPT disks
        if "gpt" == partition_table or "msdos" == partition_table:
            filepath = os.path.join(self.dest_dir,
                                    short_selected_device_node + "-mbr")
            process, flat_command_string, failed_message = Utility.run(
                "Saving " + filepath, [
                    "dd", "if=" + self.selected_drive_key, "of=" + filepath,
                    "bs=512", "count=1"
                ],
                use_c_locale=False,
                logger=self.logger)
            if process.returncode != 0:
                with self.summary_message_lock:
                    self.summary_message += failed_message
                GLib.idle_add(self.completed_backup, False, failed_message)
                return

        if "gpt" == partition_table:
            first_gpt_filename = short_selected_device_node + "-gpt-1st"
            dd_process, flat_command_string, failed_message = Utility.run(
                "Saving " + first_gpt_filename, [
                    "dd", "if=" + self.selected_drive_key,
                    "of=" + os.path.join(self.dest_dir, first_gpt_filename),
                    "bs=512", "count=34"
                ],
                use_c_locale=False,
                logger=self.logger)
            if process.returncode != 0:
                with self.summary_message_lock:
                    self.summary_message += failed_message
                GLib.idle_add(self.completed_backup, False, failed_message)
                return

            # From Clonezilla's scripts/sbin/ocs-functions:
            # We need to get the total size of disk so that we can skip and dump the last block:
            # The output of 'parted -s /dev/sda unit s print' is like:
            # --------------------
            # Disk /dev/hda: 16777215s
            # Sector size (logical/physical): 512B/512B
            # Partition Table: gpt
            #
            # Number  Start     End        Size       File system  Name     Flags
            #  1      34s       409640s    409607s    fat32        primary  msftres
            #  2      409641s   4316406s   3906766s   ext2         primary
            #  3      4316407s  15625000s  11308594s  reiserfs     primary
            # --------------------
            # to_seek = "$((${src_disk_size_sec}-33+1))"
            to_skip = parted_dict['capacity'] - 32
            second_gpt_filename = short_selected_device_node + "-gpt-2nd"
            process, flat_command_string, failed_message = Utility.run(
                "Saving " + second_gpt_filename, [
                    "dd", "if=" + self.selected_drive_key,
                    "of=" + os.path.join(self.dest_dir, second_gpt_filename),
                    "skip=" + str(to_skip), "bs=512", "count=33"
                ],
                use_c_locale=False,
                logger=self.logger)
            if process.returncode != 0:
                with self.summary_message_lock:
                    self.summary_message += failed_message
                GLib.idle_add(self.completed_backup, False, failed_message)
                return

            # LC_ALL=C sgdisk -b $target_dir_fullpath/$(to_filename ${ihd})-gpt.gdisk /dev/$ihd | tee --append ${OCS_LOGFILE}
            gdisk_filename = short_selected_device_node + "-gpt.gdisk"
            process, flat_command_string, failed_message = Utility.run(
                "Saving " + gdisk_filename, [
                    "sgdisk", "--backup",
                    os.path.join(self.dest_dir, gdisk_filename),
                    self.selected_drive_key
                ],
                use_c_locale=True,
                logger=self.logger)
            if process.returncode != 0:
                with self.summary_message_lock:
                    self.summary_message += failed_message
                GLib.idle_add(self.completed_backup, False, failed_message)
                return

            sgdisk_filename = short_selected_device_node + "-gpt.sgdisk"
            process, flat_command_string, failed_message = Utility.run(
                "Saving " + sgdisk_filename,
                ["sgdisk", "--print", self.selected_drive_key],
                use_c_locale=True,
                output_filepath=os.path.join(self.dest_dir, sgdisk_filename),
                logger=self.logger)
            if process.returncode != 0:
                with self.summary_message_lock:
                    self.summary_message += failed_message
                GLib.idle_add(self.completed_backup, False, failed_message)
                return
        elif "msdos" == partition_table:
            # image_save
            first_partition_key, first_partition_offset_bytes = CombinedDriveState.get_first_partition(
                self.partitions_to_backup)
            # Maximum hidden data to backup is 1024MB
            hidden_data_after_mbr_limit = 1024 * 1024 * 1024
            if first_partition_offset_bytes > hidden_data_after_mbr_limit:
                self.logger.write(
                    "Calculated very large hidden data after MBR size. Skipping"
                )
            else:
                first_partition_offset_sectors = int(
                    first_partition_offset_bytes / 512)
                hidden_mbr_data_filename = short_selected_device_node + "-hidden-data-after-mbr"
                # FIXME: Appears one sector too large.
                process, flat_command_string, failed_message = Utility.run(
                    "Saving " + hidden_mbr_data_filename, [
                        "dd", "if=" + self.selected_drive_key, "of=" +
                        os.path.join(self.dest_dir, hidden_mbr_data_filename),
                        "skip=1", "bs=512",
                        "count=" + str(first_partition_offset_sectors)
                    ],
                    use_c_locale=False,
                    logger=self.logger)
            if process.returncode != 0:
                with self.summary_message_lock:
                    self.summary_message += failed_message
                GLib.idle_add(self.completed_backup, False, failed_message)
                return

        else:
            self.logger.write("Partition table is: " + partition_table)

        # Parted sees drives with direct filesystem applied as loop partition table.
        if partition_table is not None and partition_table != "loop":
            sfdisk_filename = short_selected_device_node + "-pt.sf"
            process, flat_command_string, failed_message = Utility.run(
                "Saving " + sfdisk_filename,
                ["sfdisk", "--dump", self.selected_drive_key],
                output_filepath=os.path.join(self.dest_dir, sfdisk_filename),
                use_c_locale=True,
                logger=self.logger)
            if process.returncode != 0:
                with self.summary_message_lock:
                    self.summary_message += failed_message
                GLib.idle_add(self.completed_backup, False, failed_message)
                return

        process, flat_command_string, failed_message = Utility.run(
            "Retreiving disk geometry with sfdisk ",
            ["sfdisk", "--show-geometry", self.selected_drive_key],
            use_c_locale=True,
            logger=self.logger)
        if process.returncode != 0:
            with self.summary_message_lock:
                self.summary_message += failed_message
            GLib.idle_add(self.completed_backup, False, failed_message)
            return

        geometry_dict = Sfdisk.parse_sfdisk_show_geometry(process.stdout)
        filepath = os.path.join(self.dest_dir,
                                short_selected_device_node + "-chs.sf")
        with open(filepath, 'w') as filehandle:
            for key in geometry_dict.keys():
                output = key + "=" + str(geometry_dict[key])
                self.logger.write(output)
                filehandle.write('%s\n' % output)

        # Query all Physical Volumes (PV), Volume Group (VG) and Logical Volume (LV). See unit test for a worked example.
        # TODO: In the Rescuezilla application architecture, this LVM information is best extracted during the drive
        # TODO: query step, and then integrated into the "combined drive state" dictionary. Doing it during the backup
        # TODO: process matches how Clonezilla does it, which is sufficient for now.
        # FIXME: This section is duplicated in partitions_to_restore.py.
        # Start the Logical Volume Manager (LVM). Caller raises Exception on failure
        Lvm.start_lvm2(self.logger)
        relevant_vg_name_dict = {}
        vg_state_dict = Lvm.get_volume_group_state_dict(self.logger)
        for partition_key in list(self.partitions_to_backup.keys()):
            for report_dict in vg_state_dict['report']:
                for vg_dict in report_dict['vg']:
                    if 'pv_name' in vg_dict.keys(
                    ) and partition_key == vg_dict['pv_name']:
                        if 'vg_name' in vg_dict.keys():
                            vg_name = vg_dict['vg_name']
                        else:
                            GLib.idle_add(
                                ErrorMessageModalPopup.
                                display_nonfatal_warning_message, self.builder,
                                "Could not find volume group name vg_name in "
                                + str(vg_dict))
                            # TODO: Re-evaluate how exactly Clonezilla uses /NOT_FOUND and whether introducing it here
                            # TODO: could improve Rescuezilla/Clonezilla interoperability.
                            continue
                        if 'pv_uuid' in vg_dict.keys():
                            pv_uuid = vg_dict['pv_uuid']
                        else:
                            GLib.idle_add(
                                ErrorMessageModalPopup.
                                display_nonfatal_warning_message, self.builder,
                                "Could not find physical volume UUID pv_uuid in "
                                + str(vg_dict))
                            continue
                        relevant_vg_name_dict[vg_name] = partition_key
                        lvm_vg_dev_list_filepath = os.path.join(
                            self.dest_dir, "lvm_vg_dev.list")
                        with open(lvm_vg_dev_list_filepath,
                                  'a+') as filehandle:
                            filehandle.write(vg_name + " " + partition_key +
                                             " " + pv_uuid + "\n")

        lv_state_dict = Lvm.get_logical_volume_state_dict(self.logger)
        for report_dict in lv_state_dict['report']:
            for lv_dict in report_dict['lv']:
                # Only consider VGs that match the partitions to backup list
                if 'vg_name' in lv_dict.keys(
                ) and lv_dict['vg_name'] in relevant_vg_name_dict.keys():
                    vg_name = lv_dict['vg_name']
                    if 'lv_path' in lv_dict.keys():
                        lv_path = lv_dict['lv_path']
                    else:
                        GLib.idle_add(
                            ErrorMessageModalPopup.
                            display_nonfatal_warning_message, self.builder,
                            "Could not find lv_path name in " + str(lv_dict))
                        continue
                    file_command_process, flat_command_string, failed_message = Utility.run(
                        "logical volume file info",
                        ["file", "--dereference", "--special-files", lv_path],
                        use_c_locale=True,
                        logger=self.logger)
                    if file_command_process.returncode != 0:
                        with self.summary_message_lock:
                            self.summary_message += failed_message
                        GLib.idle_add(self.completed_backup, False,
                                      failed_message)
                        return

                    output = file_command_process.stdout.split(
                        " ", maxsplit=1)[1].strip()
                    lvm_logv_list_filepath = os.path.join(
                        self.dest_dir, "lvm_logv.list")
                    # Append to file
                    with open(lvm_logv_list_filepath, 'a+') as filehandle:
                        filehandle.write(lv_path + "  " + output + "\n")

                    if 'lv_dm_path' in lv_dict.keys():
                        # Device mapper path, eg /dev/mapper/vgtest-lvtest
                        lv_dm_path = lv_dict['lv_dm_path']
                    else:
                        GLib.idle_add(
                            self.completed_backup, False,
                            "Could not find lv_dm_path name in " +
                            str(lv_dict))
                        return

                    if lv_dm_path in self.drive_state.keys(
                    ) and 'partitions' in self.drive_state[lv_dm_path].keys():
                        # Remove the partition key associated with the volume group that contains this LVM logical volume
                        # eg, /dev/sdc1 with detected filesystem, and replace it with  the logical volume filesystem.
                        # In other words, don't backup both the /dev/sdc1 device node AND the /dev/mapper node.
                        long_partition_key = relevant_vg_name_dict[
                            lv_dict['vg_name']]
                        self.partitions_to_backup.pop(long_partition_key, None)
                        for logical_volume in self.drive_state[lv_dm_path][
                                'partitions'].keys():
                            # Use the system drive state to get the exact filesystem for this /dev/mapper/ node,
                            # as derived from multiple sources (parted, lsblk etc) like how Clonezilla does it.
                            self.partitions_to_backup[
                                lv_path] = self.drive_state[lv_dm_path][
                                    'partitions'][logical_volume]
                            self.partitions_to_backup[lv_path]['type'] = 'part'

                    lvm_vgname_filepath = os.path.join(
                        self.dest_dir, "lvm_" + vg_name + ".conf")
                    # TODO: Evaluate the Clonezilla message from 2013 message that this command won't work on NFS
                    # TODO: due to a vgcfgbackup file lock issue.
                    vgcfgbackup_process, flat_command_string, failed_message = Utility.run(
                        "Saving LVM VG config " + lvm_vgname_filepath, [
                            "vgcfgbackup", "--file", lvm_vgname_filepath,
                            vg_name
                        ],
                        use_c_locale=True,
                        logger=self.logger)
                    if vgcfgbackup_process.returncode != 0:
                        with self.summary_message_lock:
                            self.summary_message += failed_message
                        GLib.idle_add(self.completed_backup, False,
                                      failed_message)
                        return

        filepath = os.path.join(self.dest_dir, "dev-fs.list")
        with open(filepath, 'w') as filehandle:
            filehandle.write('# <Device name>   <File system>\n')
            filehandle.write(
                '# The file systems detected below are a combination of several sources. The values may differ from `blkid` and `parted`.\n'
            )
            for partition_key in self.partitions_to_backup.keys():
                filesystem = self.partitions_to_backup[partition_key][
                    'filesystem']
                filehandle.write('%s %s\n' % (partition_key, filesystem))

        partition_number = 0
        for partition_key in self.partitions_to_backup.keys():
            partition_number += 1
            total_progress_float = Utility.calculate_progress_ratio(
                0, partition_number, len(self.partitions_to_backup.keys()))
            GLib.idle_add(self.update_backup_progress_bar,
                          total_progress_float)
            is_unmounted, message = Utility.umount_warn_on_busy(partition_key)
            if not is_unmounted:
                self.logger.write(message)
                with self.summary_message_lock:
                    self.summary_message += message + "\n"
                GLib.idle_add(self.completed_backup, False, message)

            short_device_node = re.sub('/dev/', '', partition_key)
            short_device_node = re.sub('/', '-', short_device_node)
            filesystem = self.partitions_to_backup[partition_key]['filesystem']

            if 'type' in self.partitions_to_backup[partition_key].keys() and 'extended' in \
                    self.partitions_to_backup[partition_key]['type']:
                self.logger.write("Detected " + partition_key +
                                  " as extended partition. Backing up EBR")
                filepath = os.path.join(self.dest_dir,
                                        short_device_node + "-ebr")
                process, flat_command_string, failed_message = Utility.run(
                    "Saving " + filepath, [
                        "dd", "if=" + partition_key, "of=" + filepath,
                        "bs=512", "count=1"
                    ],
                    use_c_locale=False,
                    logger=self.logger)
            if process.returncode != 0:
                with self.summary_message_lock:
                    self.summary_message += failed_message
                GLib.idle_add(self.completed_backup, False, failed_message)
                return

            if filesystem == 'swap':
                filepath = os.path.join(
                    self.dest_dir, "swappt-" + short_device_node + ".info")
                with open(filepath, 'w') as filehandle:
                    uuid = ""
                    label = ""
                    if 'uuid' in self.partitions_to_backup[partition_key].keys(
                    ):
                        uuid = self.partitions_to_backup[partition_key]['uuid']
                    if 'label' in self.partitions_to_backup[
                            partition_key].keys():
                        label = self.partitions_to_backup[partition_key][
                            'label']
                    filehandle.write('UUID="%s"\n' % uuid)
                    filehandle.write('LABEL="%s"\n' % label)
                    with self.summary_message_lock:
                        self.summary_message += _(
                            "Successful backup of swap partition {partition_name}"
                        ).format(partition_name=partition_key) + "\n"
                continue

            # Clonezilla uses -q2 priority by default (partclone > partimage > dd).
            # PartImage does not appear to be maintained software, so for simplicity, Rescuezilla is using a
            # partclone > partclone.dd priority
            # [1] https://clonezilla.org/clonezilla-live/doc/01_Save_disk_image/advanced/09-advanced-param.php

            # Expand upon Clonezilla's ocs-get-comp-suffix() function
            compression_suffix = "gz"
            split_size = "4GB"
            # Partclone dd blocksize (16MB)
            partclone_dd_bs = "16777216"
            # TODO: Re-enable APFS support -- currently partclone Apple Filesystem is not used because it's too unstable [1]
            # [1] https://github.com/rescuezilla/rescuezilla/issues/65
            if shutil.which("partclone." +
                            filesystem) is not None and filesystem != "apfs":
                partclone_cmd_list = [
                    "partclone." + filesystem, "--logfile",
                    "/var/log/partclone.log", "--clone", "--source",
                    partition_key, "--output", "-"
                ]
                filepath = os.path.join(
                    self.dest_dir, short_device_node + "." + filesystem +
                    "-ptcl-img." + compression_suffix + ".")
                split_cmd_list = [
                    "split", "--suffix-length=2", "--bytes=" + split_size, "-",
                    filepath
                ]
            elif shutil.which("partclone.dd") is not None:
                partclone_cmd_list = [
                    "partclone.dd", "--buffer_size=" + partclone_dd_bs,
                    "--logfile", "/var/log/partclone.log", "--source",
                    partition_key, "--output", "-"
                ]
                filepath = os.path.join(
                    self.dest_dir, short_device_node + ".dd-ptcl-img." +
                    compression_suffix + ".")
                split_cmd_list = [
                    "split", "--suffix-length=2", "--bytes=" + split_size, "-",
                    filepath
                ]
            else:
                GLib.idle_add(self.completed_backup, False,
                              "Partclone not found.")
                return

            filesystem_backup_message = _(
                "Backup {partition_name} containing filesystem {filesystem} to {destination}"
            ).format(partition_name=partition_key,
                     filesystem=filesystem,
                     destination=filepath)
            GLib.idle_add(self.update_main_statusbar,
                          filesystem_backup_message)
            self.logger.write(filesystem_backup_message)

            gzip_cmd_list = ["gzip", "--stdout"]
            self.proc['partclone_backup_' + partition_key] = subprocess.Popen(
                partclone_cmd_list,
                stdout=subprocess.PIPE,
                stderr=subprocess.PIPE,
                env=env,
                encoding='utf-8')

            self.proc['gzip_' + partition_key] = subprocess.Popen(
                gzip_cmd_list,
                stdin=self.proc['partclone_backup_' + partition_key].stdout,
                stdout=subprocess.PIPE,
                env=env,
                encoding='utf-8')

            self.proc['split_' + partition_key] = subprocess.Popen(
                split_cmd_list,
                stdin=self.proc['gzip_' + partition_key].stdout,
                stdout=subprocess.PIPE,
                env=env,
                encoding='utf-8')

            # Process partclone output. Partclone outputs an update every 3 seconds, so processing the data
            # on the current thread, for simplicity.
            # Poll process.stdout to show stdout live
            while True:
                if self.requested_stop:
                    return

                output = self.proc['partclone_backup_' +
                                   partition_key].stderr.readline()
                if self.proc['partclone_backup_' +
                             partition_key].poll() is not None:
                    break
                if output:
                    temp_dict = Partclone.parse_partclone_output(output)
                    if 'completed' in temp_dict.keys():
                        total_progress_float = Utility.calculate_progress_ratio(
                            temp_dict['completed'] / 100.0, partition_number,
                            len(self.partitions_to_backup.keys()))
                        GLib.idle_add(self.update_backup_progress_bar,
                                      total_progress_float)
                    if 'remaining' in temp_dict.keys():
                        GLib.idle_add(
                            self.update_backup_progress_status,
                            filesystem_backup_message + "\n\n" + output)
            rc = self.proc['partclone_backup_' + partition_key].poll()

            self.proc['partclone_backup_' + partition_key].stdout.close(
            )  # Allow p1 to receive a SIGPIPE if p2 exits.
            self.proc['gzip_' + partition_key].stdout.close(
            )  # Allow p2 to receive a SIGPIPE if p3 exits.
            output, err = self.proc['partclone_backup_' +
                                    partition_key].communicate()
            self.logger.write("Exit output " + str(output) + "stderr " +
                              str(err))
            if self.proc['partclone_backup_' + partition_key].returncode != 0:
                partition_summary = _(
                    "<b>Failed to backup partition</b> {partition_name}"
                ).format(partition_name=partition_key) + "\n"
                with self.summary_message_lock:
                    self.summary_message += partition_summary
                self.at_least_one_non_fatal_error = True
                proc_stdout = self.proc['partclone_backup_' +
                                        partition_key].stdout
                proc_stderr = self.proc['partclone_backup_' +
                                        partition_key].stderr
                extra_info = "\nThe command used internally was:\n\n" + flat_command_string + "\n\n" + "The output of the command was: " + str(
                    proc_stdout) + "\n\n" + str(proc_stderr)
                compression_stderr = self.proc['gzip_' + partition_key].stderr
                if compression_stderr is not None and compression_stderr != "":
                    extra_info += "\n\n" + str(
                        gzip_cmd_list) + " stderr: " + compression_stderr

                # TODO: Try to backup again, but using partclone.dd
                GLib.idle_add(
                    ErrorMessageModalPopup.display_nonfatal_warning_message,
                    self.builder, partition_summary + extra_info)

            else:
                with self.summary_message_lock:
                    self.summary_message += _(
                        "Successful backup of partition {partition_name}"
                    ).format(partition_name=partition_key) + "\n"

        # GLib.idle_add(self.update_progress_bar, (i + 1) / len(self.restore_mapping_dict.keys()))
        if self.requested_stop:
            return

        progress_ratio = i / len(self.partitions_to_backup.keys())
        i += 1
        # Display 100% progress for user
        GLib.idle_add(self.update_backup_progress_bar, progress_ratio)
        sleep(1.0)
        """
            partclone_cmd_list = ["partclone", "--logfile", "/tmp/rescuezilla_logfile.txt", "--overwrite", "/dev/"]

              if [ "$fs_p" != "dd" ]; then
    cmd_partclone="partclone.${fs_p} $PARTCLONE_SAVE_OPT -L $partclone_img_info_tmp -c -s $source_dev --output - | $compress_prog_opt"
  else
    # Some parameters for partclone.dd are not required. Here "-c" is not provided by partclone.dd when saving.
    cmd_partclone="partclone.${fs_p} $PARTCLONE_SAVE_OPT --buffer_size ${partclone_dd_bs} -L $partclone_img_info_tmp -s $source_dev --output - | $compress_prog_opt"
  fi
  case "$VOL_LIMIT" in
    [1-9]*)
       # $tgt_dir/${tgt_file}.${fs_pre}-img. is prefix, the last "." is necessary make the output file is like hda1.${fs_pre}-img.aa, hda1.${fs_pre}-img.ab. We do not add -d to make it like hda1.${fs_pre}-img.00, hda1.${fs_pre}-img.01, since it will confuse people that it looks like created by partimage (hda1.${fs_pre}-img.000, hda1.${fs_pre}-img.001)
       cmd_partclone="${cmd_partclone} | split -a $split_suf_len -b ${VOL_LIMIT}MB - $tgt_dir/$(to_filename ${tgt_file}).${fs_pre}-img.${comp_suf}. 2> $split_error"
       ;;
    *)
       cmd_partclone="${cmd_partclone} > $tgt_dir/$(to_filename ${tgt_file}).${fs_pre}-img.${comp_suf} 2> $split_error"
       ;;
  esac
  echo "Run partclone: $cmd_partclone" | tee --append ${OCS_LOGFILE}
  LC_ALL=C eval "(${cmd_partclone} && exit \${PIPESTATUS[0]})"


            cmd_partimage = "partimage $DEFAULT_PARTIMAGE_SAVE_OPT $PARTIMAGE_SAVE_OPT -B gui=no save $source_dev stdout | $compress_prog_opt"
            #case
            #"$VOL_LIMIT" in
            #[1 - 9] *)
            # "$tgt_dir/${tgt_file}." is prefix, the last "." is necessary
            # make the output file is like hda1.aa, hda1.ab.
            # We do not add -d to make it like hda1.00, hda1.01, since it will confuse people that it looks like created by partimage (hda1.000, hda1.001)
            cmd_partimage = "${cmd_partimage} | split -a $split_suf_len -b ${VOL_LIMIT}MB - $tgt_dir/${tgt_file}."
            """

        # Do checksum
        # IMG_ID=$(LC_ALL=C sha512sum $img_dir/clonezilla-img | awk -F" " '{print $1}')" >> $img_dir/Info-img-id.txt

        GLib.idle_add(self.completed_backup, True, "")
Пример #17
0
    def __init__(self, absolute_qemu_img_path, enduser_filename, timeout_seconds):
        self.timeout_seconds = timeout_seconds
        self.image_format = "QEMU_FORMAT"
        self.absolute_path = absolute_qemu_img_path
        self.enduser_filename = enduser_filename
        self.normalized_sfdisk_dict = {'absolute_path': None, 'sfdisk_dict': {'partitions': {}}, 'file_length': 0}
        self.user_notes = ""
        self.warning_dict = {}

        # Clonezilla format
        self.ebr_dict = {}
        self.short_device_node_partition_list = []
        self.short_device_node_disk_list = []
        self.lvm_vg_dev_dict = {}
        self.lvm_logical_volume_dict = {}
        self.dev_fs_dict = {}
        self.size_bytes = 0
        self.enduser_readable_size = ""
        self.is_needs_decryption = False
        self.parted_dict = {'partitions': {}}
        self.post_mbr_gap_absolute_path = {}

        statbuf = os.stat(self.absolute_path)
        self.last_modified_timestamp = format_datetime(datetime.fromtimestamp(statbuf.st_mtime))
        print("Last modified timestamp " + self.last_modified_timestamp)

        dir = Path(absolute_qemu_img_path).parent.as_posix()
        print("Qemu directory : " + dir)

        qemu_img_cmd_list = ["qemu-img", "info", absolute_qemu_img_path]
        process, flat_command_string, fail_description = Utility.run("qemu-img info", qemu_img_cmd_list, use_c_locale=True)
        if process.returncode != 0:
            self.warning_dict[flat_command_string] = process.stderr
            return
        self.qemu_img_dict = QemuImage.parse_qemu_img_info(process.stdout)
        self.enduser_readable_size = self.qemu_img_dict['disk size']

        is_associated, failed_message = self.associate_nbd(QEMU_NBD_NBD_DEVICE)
        if not is_associated:
            self.warning_dict[flat_command_string] = "Could not associate: " + failed_message
            return

        self.normalized_sfdisk_dict = {'absolute_path': None, 'sfdisk_dict': {'partitions': {}}, 'file_length': 0}
        process, flat_command_string, failed_message = Utility.run("Get partition table", ["sfdisk", "--dump", QEMU_NBD_NBD_DEVICE], use_c_locale=True)
        if process.returncode != 0:
            self.warning_dict[flat_command_string] = "Could not extract partition table: " + process.stderr
            # Not returning here so can disconnect.
        else:
            sfdisk_string = process.stdout
            f = tempfile.NamedTemporaryFile(mode='w', delete=False)
            f.write(sfdisk_string)
            f.close()
            self.normalized_sfdisk_dict = Sfdisk.generate_normalized_sfdisk_dict(f.name, self)

        parted_process, flat_command_string, failed_message = Utility.run("Get filesystem information",
                                                          ["parted", "--script", QEMU_NBD_NBD_DEVICE, "unit", "s",
                                                           "print"], use_c_locale=True)
        if process.returncode != 0:
            self.warning_dict[flat_command_string] = "Could not extract filesystem: " + process.stderr
            # Not returning here so can disconnect.
        else:
            self.parted_dict = Parted.parse_parted_output(parted_process.stdout)

        is_success, failed_message = QemuImage.deassociate_nbd(QEMU_NBD_NBD_DEVICE)
        if not is_success:
            self.warning_dict[flat_command_string] = failed_message
            return
Пример #18
0
    def do_verify(self):
        self.requested_stop = False

        # Clear proc dictionary
        self.proc.clear()
        self.summary_message_lock = threading.Lock()
        self.summary_message = ""
        env = Utility.get_env_C_locale()

        self.logger = Logger("/tmp/rescuezilla.log." +
                             datetime.now().strftime("%Y%m%dT%H%M%S") + ".txt")
        GLib.idle_add(self.update_progress_bar, 0)

        # Calculate the size across all selected images
        all_images_total_size_estimate = 0
        all_images_num_partitions = 0
        for image in self.image_list:
            if not isinstance(image, FsArchiverImage):
                # Determine the size of all partition across all images. This is used for the weighted progress bar.
                all_images_total_size_estimate += image.size_bytes
                all_images_num_partitions += len(
                    image.image_format_dict_dict.keys())

        cumulative_bytes = 0
        image_number = 0
        total_partition_number = 0
        for image in self.image_list:
            image_number += 1
            image_verify_message = _("Verifying {image_name}").format(
                image_name=image.absolute_path)
            self.logger.write(image_verify_message)
            GLib.idle_add(self.display_status, image_verify_message,
                          image_verify_message)

            if self.requested_stop:
                GLib.idle_add(self.completed_verify, False,
                              _("User requested operation to stop."))
                return

            with self.summary_message_lock:
                self.summary_message += image.absolute_path + "\n"

            if isinstance(image, FsArchiverImage):
                with self.summary_message_lock:
                    self.summary_message += _(
                        "⚠"
                    ) + " " + "Verifying FsArchiver images not yet supported\n"
                continue
            if isinstance(image, MetadataOnlyImage):
                with self.summary_message_lock:
                    self.summary_message += _(
                        "⚠") + " " + "Verifying VM images not yet supported\n"
                continue

            if image.is_needs_decryption:
                with self.summary_message_lock:
                    self.summary_message += _(
                        "⚠"
                    ) + " " + "Verifying encrypted images not supported. Carefully decrypting on the command-line may be a temporary workaround.\n"
                continue

            if image.has_partition_table():
                mbr_path = image.get_absolute_mbr_path()
                mbr_size = int(os.stat(mbr_path).st_size)

                # Some image formats (like Clonezilla) have post MBR gap separate from the actual MBR
                post_mbr_size = 0
                if 'absolute_path' in image.post_mbr_gap_dict.keys():
                    post_mbr_size += int(
                        os.stat(
                            image.post_mbr_gap_dict['absolute_path']).st_size)

                if (mbr_size + post_mbr_size) <= 512:
                    if Sfdisk.has_dos_partition_table(
                            image.normalized_sfdisk_dict):
                        self.summary_message += _("❌") + " " + _(
                            "The backup's bootloader data is shorter than expected. If the backup contained certain bootloaders like GRUB, during a restore operation Rescuezilla will try and re-install the bootloader."
                        ) + "\n"
                else:
                    self.summary_message += _("✔") + " " + _(
                        "MBR backup appears correct.") + "\n"
            else:
                self.summary_message += _("No partition table found.") + "\n"

            if image.normalized_sfdisk_dict['file_length'] == 0:
                self.summary_message += _("❌") + " " + _(
                    "Sfdisk partition table file is empty or missing.") + "\n"
            else:
                self.summary_message += _("✔") + " " + _(
                    "Sfdisk partition table file is present.") + "\n"

            for partition_key in image.image_format_dict_dict.keys():
                total_partition_number += 1
                if self.requested_stop:
                    GLib.idle_add(self.completed_verify, False,
                                  _("User requested operation to stop."))
                    return

                if 'estimated_size_bytes' in image.image_format_dict_dict[
                        partition_key].keys():
                    partition_estimated_size_bytes = image.image_format_dict_dict[
                        partition_key]['estimated_size_bytes']
                elif 'absolute_filename_glob_list' in image.image_format_dict_dict[
                        partition_key].keys():
                    # TODO: Move this to the image scanning step.
                    print("Calculating estimated size from file size")
                    partition_estimated_size_bytes = Utility.count_total_size_of_files_on_disk(
                        image.image_format_dict_dict[partition_key]
                        ['absolute_filename_glob_list'], "uncompressed")
                else:
                    partition_estimated_size_bytes = 0

                filesystem_verify_message = _("Partition {partition}").format(
                    partition=partition_key)
                self.logger.write(image_verify_message)
                GLib.idle_add(
                    self.display_status,
                    image_verify_message + " " + filesystem_verify_message,
                    filesystem_verify_message)

                total_progress_float = Utility.calculate_progress_ratio(
                    current_partition_completed_percentage=0,
                    current_partition_bytes=partition_estimated_size_bytes,
                    cumulative_bytes=cumulative_bytes,
                    total_bytes=all_images_total_size_estimate,
                    image_number=total_partition_number,
                    num_partitions=all_images_num_partitions)
                GLib.idle_add(self.update_progress_bar, total_progress_float)

                if 'type' in image.image_format_dict_dict[partition_key].keys(
                ):
                    image_type = image.image_format_dict_dict[partition_key][
                        'type']
                    if image_type == 'swap':
                        self.summary_message += _(
                            "⚠"
                        ) + " " + partition_key + ": verifying swap partition images not yet supported.\n"
                        cumulative_bytes += partition_estimated_size_bytes
                        continue
                    if image_type == 'missing':
                        self.summary_message += _(
                            "❌"
                        ) + " " + partition_key + ": partition is missing.\n"
                        cumulative_bytes += partition_estimated_size_bytes
                        continue
                    if 'dd' == image_type or image.image_format_dict_dict[
                            partition_key]['binary'] == "partclone.dd":
                        self.summary_message += _(
                            "⚠"
                        ) + " " + partition_key + ": verifying raw dd images not yet supported.\n"
                        cumulative_bytes += partition_estimated_size_bytes
                        continue
                    elif 'partclone' == image_type:
                        cat_cmd_list = ["cat"] + image.image_format_dict_dict[
                            partition_key]['absolute_filename_glob_list']
                        decompression_cmd_list = Utility.get_decompression_command_list(
                            image.image_format_dict_dict[partition_key]
                            ['compression'])
                        verify_command_list = [
                            "partclone.chkimg", "--source", "-"
                        ]
                    elif 'partimage' == image_type:
                        self.summary_message += _(
                            "⚠"
                        ) + " " + partition_key + ": verifying PartImage images not yet supported.\n"
                        cumulative_bytes += partition_estimated_size_bytes
                        continue
                    elif 'ntfsclone' == image_type:
                        self.summary_message += _(
                            "⚠"
                        ) + " " + partition_key + ": Verifying NTFSclone images not yet supported.\n"
                        cumulative_bytes += partition_estimated_size_bytes
                        continue
                    elif "unknown" != image_type:
                        self.summary_message += _(
                            "❌"
                        ) + " " + partition_key + ": unknown image type.\n"
                        cumulative_bytes += partition_estimated_size_bytes
                        continue
                    else:
                        message = "Unhandled type" + image_type + " from " + partition_key
                        self.logger.write(message)
                        with self.summary_message_lock:
                            self.summary_message += message + "\n"
                        cumulative_bytes += partition_estimated_size_bytes
                        continue

                    flat_command_string = Utility.print_cli_friendly(
                        image_type + " command ", [
                            cat_cmd_list, decompression_cmd_list,
                            verify_command_list
                        ])
                    verify_cat_proc_key = 'cat_' + partition_key
                    self.proc[verify_cat_proc_key] = subprocess.Popen(
                        cat_cmd_list,
                        stdout=subprocess.PIPE,
                        env=env,
                        encoding='utf-8')
                    verify_decompression_proc_key = 'decompression_' + image.absolute_path + "_" + partition_key
                    self.proc[
                        verify_decompression_proc_key] = subprocess.Popen(
                            decompression_cmd_list,
                            stdin=self.proc[verify_cat_proc_key].stdout,
                            stdout=subprocess.PIPE,
                            env=env,
                            encoding='utf-8')

                    verify_chkimg_proc_key = image_type + '_verify_' + image.absolute_path + "_" + partition_key
                    self.proc[verify_chkimg_proc_key] = subprocess.Popen(
                        verify_command_list,
                        stdin=self.proc[verify_decompression_proc_key].stdout,
                        stdout=subprocess.PIPE,
                        stderr=subprocess.PIPE,
                        env=env,
                        encoding='utf-8')

                    # Process partclone output. Partclone outputs an update every 3 seconds, so processing the data
                    # on the current thread, for simplicity.
                    # Poll process.stdout to show stdout live
                    proc_stdout = ""
                    proc_stderr = ""
                    while True:
                        if self.requested_stop:
                            GLib.idle_add(
                                self.completed_verify, False,
                                _("User requested operation to stop."))
                            return False, _(
                                "User requested operation to stop.")

                        output = self.proc[
                            verify_chkimg_proc_key].stderr.readline()
                        proc_stderr += output
                        if self.proc[verify_chkimg_proc_key].poll(
                        ) is not None:
                            break
                        if output and ("partclone" == image_type
                                       or "dd" == image_type):
                            temp_dict = Partclone.parse_partclone_output(
                                output)
                            if 'completed' in temp_dict.keys():
                                total_progress_float = Utility.calculate_progress_ratio(
                                    current_partition_completed_percentage=
                                    temp_dict['completed'] / 100.0,
                                    current_partition_bytes=
                                    partition_estimated_size_bytes,
                                    cumulative_bytes=cumulative_bytes,
                                    total_bytes=all_images_total_size_estimate,
                                    image_number=total_partition_number,
                                    num_partitions=len(
                                        image.image_format_dict_dict[
                                            partition_key].keys()))
                                GLib.idle_add(self.update_progress_bar,
                                              total_progress_float)
                            if 'remaining' in temp_dict.keys():
                                GLib.idle_add(
                                    self.update_verify_progress_status,
                                    filesystem_verify_message + "\n\n" +
                                    output)
                        elif "partimage" == image_type:
                            self.display_status(
                                "partimage: " + filesystem_verify_message, "")
                        elif "ntfsclone" == image_type:
                            self.display_status(
                                "ntfsclone: " + filesystem_verify_message, "")

                        rc = self.proc[verify_chkimg_proc_key].poll()

                    self.proc[verify_cat_proc_key].stdout.close(
                    )  # Allow p1 to receive a SIGPIPE if p2 exits.
                    if "unknown" != image_type:
                        self.proc[verify_decompression_proc_key].stdout.close(
                        )  # Allow p2 to receive a SIGPIPE if p3 exits.
                    stdout, stderr = self.proc[
                        verify_chkimg_proc_key].communicate()
                    rc = self.proc[verify_chkimg_proc_key].returncode
                    proc_stdout += stdout
                    proc_stderr += stderr
                    self.logger.write("Exit output " + str(rc) + ": " +
                                      str(proc_stdout) + "stderr " +
                                      str(proc_stderr))
                    if self.proc[verify_chkimg_proc_key].returncode != 0:
                        partition_summary = _("❌") + " " + _(
                            "Unable to verify.") + _(
                                "Partition {partition_number}").format(
                                    partition_number=partition_key) + "\n"
                        extra_info = "\nThe command used internally was:\n\n" + flat_command_string + "\n\n" + "The output of the command was: " + str(
                            proc_stdout) + "\n\n" + str(proc_stderr)
                        decompression_stderr = self.proc[
                            verify_decompression_proc_key].stderr
                        if decompression_stderr is not None and decompression_stderr != "":
                            extra_info += "\n\n" + decompression_cmd_list[
                                0] + " stderr: " + decompression_stderr
                        GLib.idle_add(
                            ErrorMessageModalPopup.
                            display_nonfatal_warning_message, self.builder,
                            partition_summary + extra_info)
                        with self.summary_message_lock:
                            self.summary_message += partition_summary
                        cumulative_bytes += partition_estimated_size_bytes
                        continue
                    else:
                        self.summary_message += _("✔") + _(
                            "{partition}: filesystem image successfully verified."
                        ).format(partition=partition_key) + "\n"
                        cumulative_bytes += partition_estimated_size_bytes
                        continue

                cumulative_bytes += partition_estimated_size_bytes

            with self.summary_message_lock:
                self.summary_message += "\n\n"

        GLib.idle_add(self.completed_verify, True, "")
        return