コード例 #1
0
    def populate_partition_selection_table(self, drive_key):
        print('Received drive key ' + drive_key)
        print('drive state is ' + str(self.drive_state))
        self.save_partition_list_store.clear()

        try:
            if 'partitions' in self.drive_state[drive_key].keys():
                for partition_key in self.drive_state[drive_key][
                        'partitions'].keys():
                    flattened_partition_description = CombinedDriveState.flatten_partition_description(
                        self.drive_state, drive_key, partition_key)
                    # Add row that's ticked
                    self.save_partition_list_store.append(
                        [partition_key, True, flattened_partition_description])
            else:
                # Add the drive itself
                flattened_partition_description = CombinedDriveState.flatten_partition_description(
                    self.drive_state, drive_key, drive_key)
                # Add row that's ticked
                self.save_partition_list_store.append(
                    [drive_key, True, flattened_partition_description])
        except Exception as exception:
            tb = traceback.format_exc()
            traceback.print_exc()
            ErrorMessageModalPopup.display_nonfatal_warning_message(
                self.builder, tb)
        return
コード例 #2
0
ファイル: drive_query.py プロジェクト: xiogun/rescuezilla
 def populate_mount_partition_table(self, ignore_drive_key=None):
     print('drive state is ' + str(self.drive_state))
     self.mount_partition_list_store.clear()
     index = 0
     for drive_key in self.drive_state.keys():
         try:
             if drive_key == ignore_drive_key:
                 continue
             if 'partitions' not in self.drive_state[drive_key].keys():
                 continue
             for partition_key in self.drive_state[drive_key]['partitions'].keys():
                 with self._is_displaying_advanced_information_lock:
                     if self._is_displaying_advanced_information:
                         # Display a advanced-user partition name eg, "nvme0n1p1".
                         human_friendly_partition_name = partition_key
                     else:
                         if self.drive_state[drive_key]['type'] == 'loop':
                             # Don't display certain non-block device if user has chosen to hide them.
                             # TODO: Evaluate other partition types to be hidden.
                             continue
                         # Display a advanced-user partition name eg, "#4".
                         human_friendly_partition_name = "#" + str(index + 1)
                     flattened_partition_description = CombinedDriveState.flatten_partition_description(self.drive_state, drive_key, partition_key)
                 if 'size' in self.drive_state[drive_key]['partitions'][partition_key].keys():
                     size_in_bytes = self.drive_state[drive_key]['partitions'][partition_key]['size']
                     enduser_readable_size = Utility.human_readable_filesize(int(size_in_bytes))
                 else:
                     enduser_readable_size = "unknown_size"
                 self.mount_partition_list_store.append([partition_key, human_friendly_partition_name, enduser_readable_size, flattened_partition_description])
                 index = index + 1
         except Exception as exception:
             tb = traceback.format_exc()
             traceback.print_exc()
             ErrorMessageModalPopup.display_nonfatal_warning_message(self.builder, tb)
     return
コード例 #3
0
    def populate_drive_selection_table(self):
        self.drive_list_store.clear()
        index = 0
        for drive_key in self.drive_state.keys():
            try:
                drive = self.drive_state[drive_key]
                with self._is_displaying_advanced_information_lock:
                    if self._is_displaying_advanced_information:
                        # Display a advanced-user partition name eg, "nvme0n1". Users coming from Clonezilla will often
                        # like to know the device node.
                        human_friendly_drive_name = drive_key
                    else:
                        # Display a user-friendly drive name eg, "3" to refer to nvme0n1.Some Rescuezilla users may prefer
                        # drives identified by a simple digit (eg, drive #3), because they may not understand what a short
                        # device node like "nvme0n1" means.
                        human_friendly_drive_name = "#" + str(index + 1)
                        if (drive['type'] != 'disk' and not drive['type'].startswith("raid"))\
                                or drive['has_raid_member_filesystem'] or 'nbd' in drive_key :
                            # Hiding LVMs, loop devices, empty drives etc from initial drive selection list. This
                            # should greatly reduce the risk a user accidentally picks a logical volume (of their
                            # say, encrypted Debian system) when they were actually intending on picking the entire
                            # block device (including boot partition etc).
                            #
                            # Don't display non-block device if we are hiding them (like /dev/loop)
                            continue

                flattened_partition_list = CombinedDriveState.flatten_partition_list(
                    drive)
                print("For key " + drive_key +
                      ", flattened partition list is " +
                      flattened_partition_list)
                enduser_readable_capacity = Utility.human_readable_filesize(
                    int(drive['capacity']))
                self.drive_list_store.append([
                    drive_key, human_friendly_drive_name,
                    enduser_readable_capacity, drive['model'], drive['serial'],
                    flattened_partition_list
                ])
                index = index + 1
            except Exception as e:
                traceback.print_exc(file=sys.stdout)
                print("Could not process " + drive_key)
                continue
        # TODO: Don't populate mount partition here
        self.populate_mount_partition_table()
        if self.please_wait_popup is not None:
            self.please_wait_popup.destroy()
            self.please_wait_popup = None
コード例 #4
0
    def test_combined_drive_state(self):
        parted_dict_dict = {}
        sfdict_dict_dict = {}

        lsblk_json_output = """{
   "blockdevices": [
      {"kname":"/dev/loop0", "name":"/dev/loop0", "size":698761216, "type":"loop", "fstype":"squashfs", "mountpoint":"/rofs", "model":null},
      {"kname":"/dev/sda", "name":"/dev/sda", "size":34359738368, "type":"disk", "fstype":null, "mountpoint":null, "model":"VBOX_HARDDISK",
         "children": [
            {"kname":"/dev/sda1", "name":"/dev/sda1", "size":34357641216, "type":"part", "fstype":"ntfs", "mountpoint":"/mnt/backup", "model":null}
         ]
      },
      {"kname":"/dev/sdb", "name":"/dev/sdb", "size":1073741824, "type":"disk", "fstype":"LVM2_member", "mountpoint":null, "model":"VBOX_HARDDISK",
         "children": [
            {"kname":"/dev/dm-0", "name":"/dev/mapper/vgtest-lvtest", "size":1069547520, "type":"lvm", "fstype":"ext4", "mountpoint":null, "model":null}
         ]
      },
      {"kname":"/dev/sdc", "name":"/dev/sdc", "size":1610612736, "type":"disk", "fstype":"ntfs", "mountpoint":null, "model":"VBOX_HARDDISK"},
      {"kname":"/dev/sdd", "name":"/dev/sdd", "size":2147483648, "type":"disk", "fstype":null, "mountpoint":null, "model":"VBOX_HARDDISK",
         "children": [
            {"kname":"/dev/sdd1", "name":"/dev/sdd1", "size":3145728, "type":"part", "fstype":"ext4", "mountpoint":null, "model":null},
            {"kname":"/dev/sdd2", "name":"/dev/sdd2", "size":44040192, "type":"part", "fstype":"ext4", "mountpoint":null, "model":null},
            {"kname":"/dev/sdd4", "name":"/dev/sdd4", "size":1024, "type":"part", "fstype":null, "mountpoint":null, "model":null},
            {"kname":"/dev/sdd5", "name":"/dev/sdd5", "size":12582912, "type":"part", "fstype":"ext4", "mountpoint":null, "model":null},
            {"kname":"/dev/sdd6", "name":"/dev/sdd6", "size":4194304, "type":"part", "fstype":"ext4", "mountpoint":null, "model":null},
            {"kname":"/dev/sdd7", "name":"/dev/sdd7", "size":28311552, "type":"part", "fstype":"ext4", "mountpoint":null, "model":null},
            {"kname":"/dev/sdd8", "name":"/dev/sdd8", "size":4194304, "type":"part", "fstype":"ext4", "mountpoint":null, "model":null},
            {"kname":"/dev/sdd9", "name":"/dev/sdd9", "size":20971520, "type":"part", "fstype":"ext4", "mountpoint":null, "model":null},
            {"kname":"/dev/sdd10", "name":"/dev/sdd10", "size":83886080, "type":"part", "fstype":"ext4", "mountpoint":null, "model":null},
            {"kname":"/dev/sdd11", "name":"/dev/sdd11", "size":72351744, "type":"part", "fstype":"ext4", "mountpoint":null, "model":null},
            {"kname":"/dev/sdd12", "name":"/dev/sdd12", "size":18874368, "type":"part", "fstype":"ext4", "mountpoint":null, "model":null},
            {"kname":"/dev/sdd13", "name":"/dev/sdd13", "size":29360128, "type":"part", "fstype":"ext4", "mountpoint":null, "model":null},
            {"kname":"/dev/sdd14", "name":"/dev/sdd14", "size":45088768, "type":"part", "fstype":"ext4", "mountpoint":null, "model":null}
         ]
      },
      {"kname":"/dev/sde", "name":"/dev/sde", "size":2684354560, "type":"disk", "fstype":null, "mountpoint":null, "model":"VBOX_HARDDISK",
         "children": [
            {"kname":"/dev/sde1", "name":"/dev/sde1", "size":113246208, "type":"part", "fstype":"ntfs", "mountpoint":null, "model":null},
            {"kname":"/dev/sde2", "name":"/dev/sde2", "size":67108864, "type":"part", "fstype":"vfat", "mountpoint":null, "model":null},
            {"kname":"/dev/sde3", "name":"/dev/sde3", "size":2277507072, "type":"part", "fstype":"ntfs", "mountpoint":null, "model":null},
            {"kname":"/dev/sde4", "name":"/dev/sde4", "size":224395264, "type":"part", "fstype":"ext4", "mountpoint":null, "model":null}
         ]
      },
      {"kname":"/dev/sdf", "name":"/dev/sdf", "size":3221225472, "type":"disk", "fstype":null, "mountpoint":null, "model":"VBOX_HARDDISK",
         "children": [
            {"kname":"/dev/sdf1", "name":"/dev/sdf1", "size":268435456, "type":"part", "fstype":"btrfs", "mountpoint":null, "model":null},
            {"kname":"/dev/sdf2", "name":"/dev/sdf2", "size":147849216, "type":"part", "fstype":"ext2", "mountpoint":null, "model":null},
            {"kname":"/dev/sdf3", "name":"/dev/sdf3", "size":1024, "type":"part", "fstype":null, "mountpoint":null, "model":null},
            {"kname":"/dev/sdf5", "name":"/dev/sdf5", "size":52428800, "type":"part", "fstype":"ext4", "mountpoint":null, "model":null},
            {"kname":"/dev/sdf6", "name":"/dev/sdf6", "size":34603008, "type":"part", "fstype":"ntfs", "mountpoint":null, "model":null},
            {"kname":"/dev/sdf7", "name":"/dev/sdf7", "size":73400320, "type":"part", "fstype":"vfat", "mountpoint":null, "model":null},
            {"kname":"/dev/sdf8", "name":"/dev/sdf8", "size":47185920, "type":"part", "fstype":"vfat", "mountpoint":null, "model":null},
            {"kname":"/dev/sdf9", "name":"/dev/sdf9", "size":55574528, "type":"part", "fstype":"reiser4", "mountpoint":null, "model":null},
            {"kname":"/dev/sdf10", "name":"/dev/sdf10", "size":35651584, "type":"part", "fstype":"reiserfs", "mountpoint":null, "model":null},
            {"kname":"/dev/sdf11", "name":"/dev/sdf11", "size":36700160, "type":"part", "fstype":"swap", "mountpoint":null, "model":null},
            {"kname":"/dev/sdf12", "name":"/dev/sdf12", "size":379584512, "type":"part", "fstype":"ntfs", "mountpoint":null, "model":null},
            {"kname":"/dev/sdf13", "name":"/dev/sdf13", "size":45088768, "type":"part", "fstype":"udf", "mountpoint":null, "model":null},
            {"kname":"/dev/sdf14", "name":"/dev/sdf14", "size":68157440, "type":"part", "fstype":"xfs", "mountpoint":null, "model":null},
            {"kname":"/dev/sdf15", "name":"/dev/sdf15", "size":50331648, "type":"part", "fstype":null, "mountpoint":null, "model":null},
            {"kname":"/dev/sdf16", "name":"/dev/sdf16", "size":40894464, "type":"part", "fstype":null, "mountpoint":null, "model":null},
            {"kname":"/dev/sdf17", "name":"/dev/sdf17", "size":11534336, "type":"part", "fstype":"minix", "mountpoint":null, "model":null},
            {"kname":"/dev/sdf18", "name":"/dev/sdf18", "size":62914560, "type":"part", "fstype":"f2fs", "mountpoint":null, "model":null},
            {"kname":"/dev/sdf19", "name":"/dev/sdf19", "size":135266304, "type":"part", "fstype":"nilfs2", "mountpoint":null, "model":null},
            {"kname":"/dev/sdf20", "name":"/dev/sdf20", "size":1656750080, "type":"part", "fstype":"ntfs", "mountpoint":null, "model":null}
         ]
      },
      {"kname":"/dev/sdg", "name":"/dev/sdg", "size":3758096384, "type":"disk", "fstype":null, "mountpoint":null, "model":"VBOX_HARDDISK"},
      {"kname":"/dev/sdh", "name":"/dev/sdh", "size":4294967296, "type":"disk", "fstype":null, "mountpoint":null, "model":"VBOX_HARDDISK",
         "children": [
            {"kname":"/dev/sdh1", "name":"/dev/sdh1", "size":104857600, "type":"part", "fstype":"LVM2_member", "mountpoint":null, "model":null,
               "children": [
                  {"kname":"/dev/dm-1", "name":"/dev/mapper/vgtest1-lvtest1", "size":54525952, "type":"lvm", "fstype":"vfat", "mountpoint":null, "model":null}
               ]
            },
            {"kname":"/dev/sdh2", "name":"/dev/sdh2", "size":104857600, "type":"part", "fstype":"LVM2_member", "mountpoint":null, "model":null,
               "children": [
                  {"kname":"/dev/dm-3", "name":"/dev/mapper/vgtest2-lvtest2", "size":54525952, "type":"lvm", "fstype":"ntfs", "mountpoint":null, "model":null}
               ]
            },
            {"kname":"/dev/sdh3", "name":"/dev/sdh3", "size":104857600, "type":"part", "fstype":"LVM2_member", "mountpoint":null, "model":null,
               "children": [
                  {"kname":"/dev/dm-2", "name":"/dev/mapper/vgtest3-lvtest3", "size":54525952, "type":"lvm", "fstype":"ext4", "mountpoint":null, "model":null}
               ]
            }
         ]
      },
      {"kname":"/dev/sdi", "name":"/dev/sdi", "size":8589934592, "type":"disk", "fstype":null, "mountpoint":null, "model":"VBOX_HARDDISK",
         "children": [
            {"kname":"/dev/sdi1", "name":"/dev/sdi1", "size":536870912, "type":"part", "fstype":"vfat", "mountpoint":null, "model":null},
            {"kname":"/dev/sdi2", "name":"/dev/sdi2", "size":255852544, "type":"part", "fstype":"ext2", "mountpoint":null, "model":null},
            {"kname":"/dev/sdi3", "name":"/dev/sdi3", "size":7795113984, "type":"part", "fstype":"crypto_LUKS", "mountpoint":null, "model":null}
         ]
      },
      {"kname":"/dev/sdj", "name":"/dev/sdj", "size":53687091200, "type":"disk", "fstype":null, "mountpoint":null, "model":"VBOX_HARDDISK",
         "children": [
            {"kname":"/dev/sdj1", "name":"/dev/sdj1", "size":554696704, "type":"part", "fstype":"ntfs", "mountpoint":null, "model":null},
            {"kname":"/dev/sdj2", "name":"/dev/sdj2", "size":104857600, "type":"part", "fstype":"vfat", "mountpoint":null, "model":null},
            {"kname":"/dev/sdj3", "name":"/dev/sdj3", "size":16777216, "type":"part", "fstype":null, "mountpoint":null, "model":null},
            {"kname":"/dev/sdj4", "name":"/dev/sdj4", "size":53008662528, "type":"part", "fstype":"ntfs", "mountpoint":null, "model":null}
         ]
      },
      {"kname":"/dev/sdk", "name":"/dev/sdk", "size":1073741824, "type":"disk", "fstype":null, "mountpoint":null, "model":"VBOX_HARDDISK",
         "children": [
            {"kname":"/dev/sdk1", "name":"/dev/sdk1", "size":1072693248, "type":"part", "fstype":"linux_raid_member", "mountpoint":null, "model":null,
               "children": [
                  {"kname":"/dev/md127", "name":"/dev/md127", "size":1071644672, "type":"raid1", "fstype":"ext4", "mountpoint":null, "model":null}
               ]
            }
         ]
      },
      {"kname":"/dev/sdl", "name":"/dev/sdl", "size":1073741824, "type":"disk", "fstype":null, "mountpoint":null, "model":"VBOX_HARDDISK",
         "children": [
            {"kname":"/dev/sdl1", "name":"/dev/sdl1", "size":1072693248, "type":"part", "fstype":"linux_raid_member", "mountpoint":null, "model":null,
               "children": [
                  {"kname":"/dev/md127", "name":"/dev/md127", "size":1071644672, "type":"raid1", "fstype":"ext4", "mountpoint":null, "model":null}
               ]
            }
         ]
      },
      {"kname":"/dev/sdm", "name":"/dev/sdm", "size":1073741824, "type":"disk", "fstype":null, "mountpoint":null, "model":"VBOX_HARDDISK",
         "children": [
            {"kname":"/dev/sdm1", "name":"/dev/sdm1", "size":1072693248, "type":"part", "fstype":"linux_raid_member", "mountpoint":null, "model":null,
               "children": [
                  {"kname":"/dev/md0", "name":"/dev/md0", "size":1071644672, "type":"raid1", "fstype":"ext4", "mountpoint":null, "model":null}
               ]
            }
         ]
      },
      {"kname":"/dev/sdn", "name":"/dev/sdn", "size":1073741824, "type":"disk", "fstype":null, "mountpoint":null, "model":"VBOX_HARDDISK",
         "children": [
            {"kname":"/dev/sdn1", "name":"/dev/sdn1", "size":1072693248, "type":"part", "fstype":"linux_raid_member", "mountpoint":null, "model":null,
               "children": [
                  {"kname":"/dev/md0", "name":"/dev/md0", "size":1071644672, "type":"raid1", "fstype":"ext4", "mountpoint":null, "model":null}
               ]
            }
         ]
      },
      {"kname":"/dev/sdo", "name":"/dev/sdo", "size":1610612736, "type":"disk", "fstype":"ext4", "mountpoint":null, "model":"VBOX_HARDDISK"},
      {"kname":"/dev/sr0", "name":"/dev/sr0", "size":805623808, "type":"rom", "fstype":"iso9660", "mountpoint":"/cdrom", "model":"VBOX_CD-ROM"}
   ]
}"""

        lsblk_json_dict = json.loads(lsblk_json_output)

        input_blkid_string = """/dev/mapper/vgtest-lvtest: UUID="b9131c40-9742-416c-b019-8b11481a86ac" TYPE="ext4"
/dev/sda1: UUID="5C2F5F000C198BC5" TYPE="ntfs" PTTYPE="dos" PARTUUID="9aa1db09-6b7c-45a6-b392-c14518730297"
/dev/sdc: UUID="0818993868533997" TYPE="ntfs" PTTYPE="dos"
/dev/sdd1: UUID="925ef222-bc6d-4e46-a31d-af3443821ed6" TYPE="ext4" PARTUUID="1dbd2bfc-01"
/dev/sdd10: UUID="2708289c-c559-4d5c-bed1-98735f673bc0" TYPE="ext4" PARTUUID="1dbd2bfc-0a"
/dev/sdd11: UUID="c1dc5a1d-d22b-4a64-933f-5495a454eb05" TYPE="ext4" PARTUUID="1dbd2bfc-0b"
/dev/sdd12: UUID="47d1b78f-dd89-44d2-8af6-e7c01f1b635e" TYPE="ext4" PARTUUID="1dbd2bfc-0c"
/dev/sdd13: UUID="39fb1fdd-2cb8-449d-8d15-3eb7ab682677" TYPE="ext4" PARTUUID="1dbd2bfc-0d"
/dev/sdd14: UUID="f40bf79e-619e-422c-a546-ba78caa4ced7" TYPE="ext4" PARTUUID="1dbd2bfc-0e"
/dev/sdd2: UUID="206b6149-a675-4149-b368-8d7f158d9da2" TYPE="ext4" PARTUUID="1dbd2bfc-02"
/dev/sdd5: UUID="b8a96bc5-43c5-4180-a6c6-12466b544ced" TYPE="ext4" PARTUUID="1dbd2bfc-05"
/dev/sdd6: UUID="0897426b-24ee-47ed-8c85-e034205a43aa" TYPE="ext4" PARTUUID="1dbd2bfc-06"
/dev/sdd7: UUID="c4f8b878-1a4e-424b-93dd-17ea6d8e14d2" TYPE="ext4" PARTUUID="1dbd2bfc-07"
/dev/sdd8: UUID="e4fa2ad0-a206-447e-9c14-829d52c10407" TYPE="ext4" PARTUUID="1dbd2bfc-08"
/dev/sdd9: UUID="34648ad0-2fc1-47e1-8503-dc88b5152386" TYPE="ext4" PARTUUID="1dbd2bfc-09"
/dev/sde1: UUID="39DE69624116B96D" TYPE="ntfs" PTTYPE="dos" PARTUUID="4c6ec2cf-8de5-43c0-9757-41a596de5486"
/dev/sde2: UUID="B155-F891" TYPE="vfat" PARTUUID="be9f4179-560c-4bc9-8366-ae214f69a16e"
/dev/sde3: UUID="286C1DA536C63454" TYPE="ntfs" PTTYPE="dos" PARTUUID="363bdd4e-d6a1-42e6-a4da-616cd3e46952"
/dev/sde4: UUID="11d0533c-feba-4a07-ae30-7ff3720a051e" TYPE="ext4" PARTUUID="ced0279d-0096-4ebe-8425-4c6c9d46a4d2"
/dev/sdf1: UUID="c297cbb6-acb5-4d6a-9e34-af7558e120a0" UUID_SUB="695a452a-c43c-477c-8d5c-a21802e18091" TYPE="btrfs" PARTUUID="43c18652-01"
/dev/sdf12: UUID="023E47301DBC9964" TYPE="ntfs" PTTYPE="dos" PARTUUID="43c18652-0c"
/dev/sdf13: UUID="5f4b3897b65975a8" LABEL="LinuxUDF" TYPE="udf" PARTUUID="43c18652-0d"
/dev/sdf17: TYPE="minix" PARTUUID="43c18652-11"
/dev/sdf18: UUID="d73d2f4f-fba2-43ef-ad7a-c5fe877bf8a9" TYPE="f2fs" PARTUUID="43c18652-12"
/dev/sdf2: UUID="04944da2-d784-4c9f-b143-060d2776c4b1" TYPE="ext2" PARTUUID="43c18652-02"
/dev/sdf20: UUID="718E7F8E7F42D1B8" TYPE="ntfs" PTTYPE="dos" PARTUUID="43c18652-14"
/dev/sdf5: UUID="a2f8e4d6-ac83-4c57-a046-b7122c0398d5" TYPE="ext4" PARTUUID="43c18652-05"
/dev/sdf6: UUID="1297CD00121D448B" TYPE="ntfs" PTTYPE="dos" PARTUUID="43c18652-06"
/dev/sdf7: SEC_TYPE="msdos" UUID="CF57-1227" TYPE="vfat" PARTUUID="43c18652-07"
/dev/sdf8: UUID="CF6A-B2D0" TYPE="vfat" PARTUUID="43c18652-08"
/dev/sdi1: UUID="F5A2-3D31" TYPE="vfat" PARTUUID="b227e8b3-c8ea-448f-9657-53670575e6a8"
/dev/sdi2: UUID="80a2000c-c375-4a74-b6f9-2f1e1c7a8958" TYPE="ext2" PARTUUID="a3b889cb-31af-469d-a584-34edb323c62a"
/dev/sdj1: LABEL="Recovery" UUID="5C22168F22166E70" TYPE="ntfs" PARTLABEL="Basic data partition" PARTUUID="e3e94ae6-c2ab-495a-a955-a32140f56c2a"
/dev/sdj2: UUID="2C16-C81E" TYPE="vfat" PARTLABEL="EFI system partition" PARTUUID="7423670f-d0c3-4724-82f7-3185350a1bf7"
/dev/sdj4: UUID="DA40176E4017511D" TYPE="ntfs" PARTLABEL="Basic data partition" PARTUUID="1f1c6171-d10c-44c0-ba9b-e12995d7f4da"
/dev/sdk1: UUID="b4b3109e-816d-bcee-66c4-151e60fd8e23" UUID_SUB="cbb347a0-e478-39be-5dae-8b2955375ff6" LABEL="ubuntu:0" TYPE="linux_raid_member" PARTUUID="edb03a25-01"
/dev/sdl1: UUID="b4b3109e-816d-bcee-66c4-151e60fd8e23" UUID_SUB="17ee1620-2ccc-c266-1cd8-a567f6896d7a" LABEL="ubuntu:0" TYPE="linux_raid_member" PARTUUID="c6774609-01"
/dev/sr0: UUID="2020-09-04-10-27-14-00" LABEL="Rescuezilla" TYPE="iso9660" PTTYPE="PMBR"
/dev/loop0: TYPE="squashfs"
/dev/sdb: UUID="i20UTQ-OaX3-c6nB-CiBv-Gav1-hgVf-tEkO2W" TYPE="LVM2_member"
/dev/sdf9: UUID="0ab2eb0a-6c94-4f78-8206-7d725bbeb4e5" TYPE="reiser4" PARTUUID="43c18652-09"
/dev/sdf10: UUID="d230da2e-5359-43cb-827e-40c48a0a572b" TYPE="reiserfs" PARTUUID="43c18652-0a"
/dev/sdf11: UUID="217c8359-2ce6-4d07-bc44-4dcae84bc089" TYPE="swap" PARTUUID="43c18652-0b"
/dev/sdf14: UUID="3211b89b-b5ce-461d-8e6e-8acfaaa4bb28" TYPE="xfs" PARTUUID="43c18652-0e"
/dev/sdf19: UUID="957acceb-12f3-4574-ac1f-fb6e53f1a22f" TYPE="nilfs2" PARTUUID="43c18652-13"
/dev/sdh1: UUID="aNcRGF-4HyS-UoFu-aNpE-tXKI-nyeu-LiKaTm" TYPE="LVM2_member" PARTUUID="c64c40a3-4eec-4b86-820a-f8068ad3f686"
/dev/sdh2: UUID="cwuIbV-pb5s-9whu-eezX-WiZU-4wlY-gvuLZS" TYPE="LVM2_member" PARTUUID="45e285ea-caa3-4fc7-a7fe-9727d3198f09"
/dev/sdh3: UUID="jStAm5-tt1J-6uRa-HElo-EROm-DgjD-B5OJ3B" TYPE="LVM2_member" PARTUUID="4a014da5-75ca-45b2-9b62-7e3380c14570"
/dev/sdi3: UUID="17147edc-1e54-4300-b47a-01b138581512" TYPE="crypto_LUKS" PARTUUID="5f64fbd4-dcad-4467-8a8b-f3e009871661"
/dev/md127: UUID="0ca32d11-af2c-4512-9e68-bef318870149" TYPE="ext4"
/dev/mapper/vgtest1-lvtest1: SEC_TYPE="msdos" UUID="0559-959C" TYPE="vfat"
/dev/mapper/vgtest3-lvtest3: UUID="846a2cbd-b040-4afd-bb1c-8ecd6e15f0c2" TYPE="ext4"
/dev/mapper/vgtest2-lvtest2: UUID="588E895406ECC468" TYPE="ntfs" PTTYPE="dos"
/dev/sdf15: PARTUUID="43c18652-0f"
/dev/sdf16: PARTUUID="43c18652-10"
/dev/sdj3: PARTLABEL="Microsoft reserved partition" PARTUUID="229ef65d-3315-4824-945e-9719feda2f42"
/dev/sdm1: UUID="75515f3b-95ea-ef00-e327-c48e2784e416" UUID_SUB="52b46420-82ff-7e66-ff87-4195a846f804" LABEL="ubuntu:0" TYPE="linux_raid_member" PARTUUID="e02572d4-01"
/dev/sdn1: UUID="75515f3b-95ea-ef00-e327-c48e2784e416" UUID_SUB="5a61afd1-e3eb-d319-f6ca-a0135c0889de" LABEL="ubuntu:0" TYPE="linux_raid_member" PARTUUID="1e066523-01"
/dev/md0: UUID="42ba6b53-6752-4ca7-b5a7-95a5e766ce97" BLOCK_SIZE="4096" TYPE="ext4"
/dev/sdo: UUID="642af36d-7695-4376-a6f9-a35a15552e33" BLOCK_SIZE="4096" TYPE="ext4" """
        blkid_dict = Blkid.parse_blkid_output(input_blkid_string)

        os_prober_contents = """/dev/sdc2@/efi/Microsoft/Boot/bootmgfw.efi:Windows Boot Manager:Windows:efi
        /dev/sdd1:Debian GNU/Linux 10 (buster):Debian:linux"""
        osprober_dict = OsProber.parse_os_prober_output(os_prober_contents)

        input_parted_gpt_string = """Model: ATA VBOX HARDDISK (scsi)
Disk /dev/sde: 2684354560B
Sector size (logical/physical): 512B/512B
Partition Table: gpt
Disk Flags: 

Number  Start        End          Size         File system  Name  Flags
 1      1048576B     114294783B   113246208B   ntfs               msftdata
 2      114294784B   181403647B   67108864B    fat32              msftdata
 3      181403648B   2458910719B  2277507072B  ntfs               msftdata
 4      2458910720B  2683305983B  224395264B   ext4"""
        parted_dict_dict['/dev/sde'] = Parted.parse_parted_output(
            input_parted_gpt_string)

        input_sfdisk_gpt_string = """label: gpt
label-id: 5FA01E95-F3E8-4B92-845B-843609E4EF0D
device: /dev/sde
unit: sectors
first-lba: 34
last-lba: 5242846

/dev/sde1 : start=        2048, size=      221184, type=EBD0A0A2-B9E5-4433-87C0-68B6B72699C7, uuid=4C6EC2CF-8DE5-43C0-9757-41A596DE5486
/dev/sde2 : start=      223232, size=      131072, type=EBD0A0A2-B9E5-4433-87C0-68B6B72699C7, uuid=BE9F4179-560C-4BC9-8366-AE214F69A16E
/dev/sde3 : start=      354304, size=     4448256, type=EBD0A0A2-B9E5-4433-87C0-68B6B72699C7, uuid=363BDD4E-D6A1-42E6-A4DA-616CD3E46952
/dev/sde4 : start=     4802560, size=      438272, type=0FC63DAF-8483-4772-8E79-3D69D8477DE4, uuid=CED0279D-0096-4EBE-8425-4C6C9D46A4D2"""
        sfdict_dict_dict['/dev/sde'] = Sfdisk.parse_sfdisk_dump_output(
            input_sfdisk_gpt_string)

        input_parted_mbr_string = """Model: ATA VBOX HARDDISK (scsi)
Disk /dev/sdd: 2147483648B
Sector size (logical/physical): 512B/512B
Partition Table: msdos
Disk Flags: 

Number  Start        End          Size         Type      File system  Flags
 4      1048576B     2100297727B  2099249152B  extended
14      2097152B     47185919B    45088768B    logical   ext4
13      48234496B    77594623B    29360128B    logical   ext4
 5      78643200B    91226111B    12582912B    logical   ext4
 9      92274688B    113246207B   20971520B    logical   ext4
 6      114294784B   118489087B   4194304B     logical   ext2
 7      119537664B   147849215B   28311552B    logical   ext4
 8      148897792B   153092095B   4194304B     logical   ext2
10      154140672B   238026751B   83886080B    logical   ext4
11      239075328B   311427071B   72351744B    logical   ext4
12      312475648B   331350015B   18874368B    logical   ext4
 1      2100297728B  2103443455B  3145728B     primary   ext2
 2      2103443456B  2147483647B  44040192B    primary   ext4"""
        parted_dict_dict['/dev/sdd'] = Parted.parse_parted_output(
            input_parted_mbr_string)

        input_parted_sdm_string = """Model: ATA VBOX HARDDISK (scsi)
Disk /dev/sdm: 1073741824B
Sector size (logical/physical): 512B/512B
Partition Table: msdos
Disk Flags:

Number  Start     End          Size         Type     File system  Flags
 1      1048576B  1073741823B  1072693248B  primary               raid

"""
        parted_dict_dict['/dev/sdm'] = Parted.parse_parted_output(
            input_parted_sdm_string)

        input_parted_sdn_string = """
Model: ATA VBOX HARDDISK (scsi)
Disk /dev/sdn: 1073741824B
Sector size (logical/physical): 512B/512B
Partition Table: msdos
Disk Flags:

Number  Start     End          Size         Type     File system  Flags
 1      1048576B  1073741823B  1072693248B  primary               raid

"""
        parted_dict_dict['/dev/sdn'] = Parted.parse_parted_output(
            input_parted_sdn_string)

        input_parted_fs_directly_on_disk_string = """
Model: ATA VBOX HARDDISK (scsi)
Disk /dev/sdo: 1610612736B
Sector size (logical/physical): 512B/512B
Partition Table: loop
Disk Flags:

Number  Start  End          Size         File system  Flags
 1      0B     1610612735B  1610612736B  ext4

"""
        parted_dict_dict['/dev/sdo'] = Parted.parse_parted_output(
            input_parted_fs_directly_on_disk_string)

        input_sfdisk_sdm_string = """label: dos
label-id: 0xe02572d4
device: /dev/sdm
unit: sectors
sector-size: 512

/dev/sdm1 : start=        2048, size=     2095104, type=fd"""
        sfdict_dict_dict['/dev/sdm'] = Sfdisk.parse_sfdisk_dump_output(
            input_sfdisk_sdm_string)

        input_sfdisk_sdn_string = """label: dos
label-id: 0x1e066523
device: /dev/sdn
unit: sectors
sector-size: 512

/dev/sdn1 : start=        2048, size=     2095104, type=fd"""
        sfdict_dict_dict['/dev/sdn'] = Sfdisk.parse_sfdisk_dump_output(
            input_sfdisk_sdn_string)

        input_sfdisk_mbr_string = """label: dos
label-id: 0x1dbd2bfc
device: /dev/sdd
unit: sectors

/dev/sdd1 : start=     4102144, size=        6144, type=83
/dev/sdd2 : start=     4108288, size=       86016, type=83
/dev/sdd4 : start=        2048, size=     4100096, type=5
/dev/sdd5 : start=      153600, size=       24576, type=83
/dev/sdd6 : start=      223232, size=        8192, type=83
/dev/sdd7 : start=      233472, size=       55296, type=83
/dev/sdd8 : start=      290816, size=        8192, type=83
/dev/sdd9 : start=      180224, size=       40960, type=83
/dev/sdd10 : start=      301056, size=      163840, type=83
/dev/sdd11 : start=      466944, size=      141312, type=83
/dev/sdd12 : start=      610304, size=       36864, type=83
/dev/sdd13 : start=       94208, size=       57344, type=83
/dev/sdd14 : start=        4096, size=       88064, type=83 """
        sfdict_dict_dict['/dev/sdd'] = Sfdisk.parse_sfdisk_dump_output(
            input_sfdisk_mbr_string)
        pp = pprint.PrettyPrinter(indent=4)
        combined_drive_state_dict = CombinedDriveState.construct_combined_drive_state_dict(
            lsblk_json_dict, blkid_dict, osprober_dict, parted_dict_dict,
            sfdict_dict_dict)
        pp.pprint(combined_drive_state_dict)
        CombinedDriveState.get_first_partition(
            combined_drive_state_dict['/dev/sdd']['partitions'])
コード例 #5
0
    def _use_existing_drive_partition_table(self):
        self.destination_partition_combobox_list.clear()
        self.partition_selection_list.clear()

        num_destination_partitions = 0

        with self.lvm_lv_path_lock:
            for lvm_lv_path in self.lvm_lv_path_list:
                self.destination_partition_combobox_list.append([lvm_lv_path, "Logical Volume: " + lvm_lv_path])
                num_destination_partitions += 1

        print("Looking at " + str(self.selected_image) + " and " + str(self.dest_drive_dict))

        # For the safety of end-users, ensure the initial combobox mapping is blank. It's possible to autogenerate a
        # mapping, but this could be wrong so far simpler for now to leave the mapping blank and rely on end-user
        # decisions.
        flattened_part_description = self.NOT_RESTORING_PARTITION_ENDUSER_FRIENDLY
        dest_partition_key = self.NOT_RESTORING_PARTITION_KEY
        is_restoring_partition = False

        # Populate image partition selection list (left-hand side column)
        if isinstance(self.selected_image, ClonezillaImage) or isinstance(self.selected_image, RedoBackupLegacyImage) or \
                isinstance(self.selected_image, FogProjectImage) or isinstance(self.selected_image, RedoRescueImage) or \
                isinstance(self.selected_image, FoxcloneImage) or isinstance(self.selected_image, ApartGtkImage) or \
                isinstance(self.selected_image, MetadataOnlyImage):
            for image_format_dict_key in self.selected_image.image_format_dict_dict.keys():
                if self.selected_image.does_image_key_belong_to_device(image_format_dict_key):
                    if self.selected_image.image_format_dict_dict[image_format_dict_key]['is_lvm_logical_volume']:
                        flat_image_part_description = "Logical Volume " + image_format_dict_key + ": "\
                                                      + self.selected_image.flatten_partition_string(image_format_dict_key)
                    elif isinstance(self.selected_image, ApartGtkImage):
                        # ApartGtkImage may contain multiple partitions, so the key contains the timestamp too. Therefore
                        # need to make sure the split device string function doesn't get called
                        flat_image_part_description = image_format_dict_key + ": "\
                                                      + self.selected_image.flatten_partition_string(image_format_dict_key)
                    else:
                        image_base_device_node, image_partition_number = Utility.split_device_string(image_format_dict_key)
                        flat_image_part_description = _("Partition {partition_number}").format(partition_number=str(
                    image_partition_number)) + ": "\
                                                      + self.selected_image.flatten_partition_string(image_format_dict_key)
                    self.partition_selection_list.append(
                        [image_format_dict_key, is_restoring_partition, flat_image_part_description, dest_partition_key,
                         flattened_part_description,
                         dest_partition_key, flattened_part_description])
                    num_destination_partitions += 1
        elif isinstance(self.selected_image, FsArchiverImage):
            for fs_key in self.selected_image.fsa_dict['filesystems'].keys():
                flat_image_part_description = "Filesystem " + str(
                fs_key) + ": " + self.selected_image.flatten_partition_string(fs_key)
                self.partition_selection_list.append(
                    [fs_key, is_restoring_partition, flat_image_part_description, dest_partition_key,
                     flattened_part_description,
                     dest_partition_key, flattened_part_description])
                num_destination_partitions += 1

        if num_destination_partitions == 0:
            # The destination disk must be empty.
            self.partition_selection_list.append(
                [self.dest_drive_node, is_restoring_partition, flat_image_part_description, self.dest_drive_node,
                 flattened_part_description,
                 dest_partition_key, flattened_part_description])

        # Populate combobox (right-hand side column)
        num_combo_box_entries = 0
        is_destination_partition_target_drive = False
        if 'partitions' in self.dest_drive_dict.keys() and len(self.dest_drive_dict['partitions'].keys()) > 0:
            # Loop over the partitions in in the destination drive
            for dest_partition_key in self.dest_drive_dict['partitions'].keys():
                if 'type' in self.dest_drive_dict['partitions'][dest_partition_key].keys() and self.dest_drive_dict['partitions'][dest_partition_key]['type'] == "extended":
                    # Do not add a destination combobox entry for any Extended Boot Record (EBR) destination partition
                    # nodes to reduce risk of user confusion.
                    continue
                if dest_partition_key == self.dest_drive_node:
                    is_destination_partition_target_drive = True
                flattened_part_description = dest_partition_key + ": " + CombinedDriveState.flatten_part(
                    self.dest_drive_dict['partitions'][dest_partition_key])
                self.destination_partition_combobox_list.append([dest_partition_key, flattened_part_description])
                num_combo_box_entries += 1

        # If there is no partitions on the destination disk, provide the option to remap the partitions to the whole
        # destination disk. If the source image doesn't have a partition table, also want to be able to remap partitons
        # to the destination disk. Finally, if the destination disk already has a filesystem directly on disk then
        # that would have already been handled above and there's no need to add a new entry to the combobox.
        if (num_combo_box_entries == 0 or not self.selected_image.has_partition_table()) and not is_destination_partition_target_drive:
            flattened_disk_description = self.dest_drive_node + ": " + CombinedDriveState.flatten_drive(self.dest_drive_dict)
            # If there are no partitions in the destination drive, we place the entire drive as the destination
            self.destination_partition_combobox_list.append([self.dest_drive_node, "WHOLE DRIVE " + flattened_disk_description])

        for mode in self.mode_list:
            self.destination_partition_combobox_cell_renderer_dict[mode].set_sensitive(True)
コード例 #6
0
    def _use_existing_drive_partition_table(self):
        self.destination_partition_combobox_list.clear()
        self.restore_partition_selection_list.clear()

        num_destination_partitions = 0

        with self.lvm_lv_path_lock:
            for lvm_lv_path in self.lvm_lv_path_list:
                self.destination_partition_combobox_list.append(
                    [lvm_lv_path, "Logical Volume: " + lvm_lv_path])
                num_destination_partitions += 1

        print("Looking at " + str(self.selected_image) + " and " +
              str(self.dest_drive_dict))

        # For the safety of end-users, ensure the initial combobox mapping is blank. It's possible to autogenerate a
        # mapping, but this could be wrong so far simpler for now to leave the mapping blank and rely on end-user
        # decisions.
        flattened_part_description = self.NOT_RESTORING_PARTITION_ENDUSER_FRIENDLY
        dest_partition_key = self.NOT_RESTORING_PARTITION_KEY
        is_restoring_partition = False

        # Populate image partition selection list (left-hand side column)
        if isinstance(self.selected_image, ClonezillaImage):
            for image_format_dict_key in self.selected_image.image_format_dict_dict.keys(
            ):
                # TODO: Support Clonezilla multidisk
                short_device_key = self.selected_image.short_device_node_disk_list[
                    0]
                if self.selected_image.does_image_key_belong_to_device(
                        image_format_dict_key, short_device_key):
                    if self.selected_image.image_format_dict_dict[
                            image_format_dict_key]['is_lvm_logical_volume']:
                        flat_image_part_description = "Logical Volume " + image_format_dict_key + ": " + self.selected_image.flatten_partition_string(
                            short_device_key, image_format_dict_key)
                    else:
                        image_base_device_node, image_partition_number = Utility.split_device_string(
                            image_format_dict_key)
                        flat_image_part_description = "Partition " + str(
                            image_partition_number
                        ) + ": " + self.selected_image.flatten_partition_string(
                            short_device_key, image_format_dict_key)
                    self.restore_partition_selection_list.append([
                        image_format_dict_key, is_restoring_partition,
                        flat_image_part_description, dest_partition_key,
                        flattened_part_description, dest_partition_key,
                        flattened_part_description
                    ])
                    num_destination_partitions += 1
        elif isinstance(self.selected_image, RedoBackupLegacyImage):
            partitions = self.selected_image.short_device_node_partition_list
            for image_format_dict_key in partitions:
                image_base_device_node, image_partition_number = Utility.split_device_string(
                    image_format_dict_key)
                flat_image_part_description = "Partition " + str(
                    image_partition_number
                ) + ": " + self.selected_image.flatten_partition_string(
                    image_format_dict_key)
                self.restore_partition_selection_list.append([
                    image_format_dict_key, is_restoring_partition,
                    flat_image_part_description, dest_partition_key,
                    flattened_part_description, dest_partition_key,
                    flattened_part_description
                ])
                num_destination_partitions += 1

        if num_destination_partitions == 0:
            # The destination disk must be empty.
            self.restore_partition_selection_list.append([
                self.dest_drive_node, is_restoring_partition,
                flat_image_part_description, self.dest_drive_node,
                flattened_part_description, dest_partition_key,
                flattened_part_description
            ])

        # Populate combobox (right-hand side column)
        num_combo_box_entries = 0
        if 'partitions' in self.dest_drive_dict.keys() and len(
                self.dest_drive_dict['partitions'].keys()) > 0:
            # Loop over the partitions in in the destination drive
            for dest_partition_key in self.dest_drive_dict['partitions'].keys(
            ):
                if 'type' in self.dest_drive_dict['partitions'][
                        dest_partition_key].keys(
                        ) and self.dest_drive_dict['partitions'][
                            dest_partition_key]['type'] == "extended":
                    # Do not add a destination combobox entry for any Extended Boot Record (EBR) destination partition
                    # nodes to reduce risk of user confusion.
                    continue

                flattened_part_description = dest_partition_key + ": " + CombinedDriveState.flatten_part(
                    self.dest_drive_dict['partitions'][dest_partition_key])
                self.destination_partition_combobox_list.append(
                    [dest_partition_key, flattened_part_description])
                num_combo_box_entries += 1

        if num_combo_box_entries == 0:
            # TODO: Improve disk description
            flattened_disk_description = self.dest_drive_node
            # If there are no partitions in the destination drive, we place the entire drive as the destination
            self.destination_partition_combobox_list.append([
                self.dest_drive_node,
                "WHOLE DRIVE " + flattened_disk_description
            ])

        self.builder.get_object(
            "destination_partition_combobox_cell_renderer").set_sensitive(True)
コード例 #7
0
    def _do_drive_query(self):
        env_C_locale = Utility.get_env_C_locale()

        drive_query_start_time = datetime.now()

        GLib.idle_add(self.please_wait_popup.set_secondary_label_text,
                      _("Unmounting: {path}").format(path=IMAGE_EXPLORER_DIR))
        returncode, failed_message = ImageExplorerManager._do_unmount(
            IMAGE_EXPLORER_DIR)
        if not returncode:
            GLib.idle_add(
                self.error_message_callback, False,
                _("Unable to shutdown Image Explorer") + "\n\n" +
                failed_message)
            GLib.idle_add(self.please_wait_popup.destroy)
            return

        if self.is_stop_requested():
            GLib.idle_add(self.error_message_callback, False,
                          _("Operation cancelled by user."))
            return

        GLib.idle_add(
            self.please_wait_popup.set_secondary_label_text,
            _("Unmounting: {path}").format(path=RESCUEZILLA_MOUNT_TMP_DIR))
        returncode, failed_message = ImageExplorerManager._do_unmount(
            RESCUEZILLA_MOUNT_TMP_DIR)
        if not returncode:
            GLib.idle_add(
                self.error_message_callback, False,
                _("Unable to unmount {path}").format(
                    path=RESCUEZILLA_MOUNT_TMP_DIR) + "\n\n" + failed_message)
            GLib.idle_add(self.please_wait_popup.destroy)
            return

        if self.is_stop_requested():
            GLib.idle_add(self.error_message_callback, False,
                          _("Operation cancelled by user."))
            return

        lsblk_cmd_list = [
            "lsblk", "-o",
            "KNAME,NAME,SIZE,TYPE,FSTYPE,MOUNTPOINT,MODEL,SERIAL", "--paths",
            "--bytes", "--json"
        ]
        blkid_cmd_list = ["blkid"]
        os_prober_cmd_list = ["os-prober"]

        lsblk_json_dict = {}
        blkid_dict = {}
        os_prober_dict = {}
        parted_dict_dict = collections.OrderedDict([])
        sfdisk_dict_dict = collections.OrderedDict([])

        # Clonezilla combines drive, partition and filesystem from multiple data sources (lsblk, blkid, parted etc)
        # Rescuezilla continues this approach to reach best possible Clonezilla compatibility.
        #
        # However this sequential querying is slow. A parallel approach should be in theory much faster (but might be
        # less reliable if internal commands are creating file locks etc.)
        #
        # In practice, the sequential approach was about 25% faster than a first-cut (polling-based) parallel approach.
        # Parallel mode currently disabled, but kept for further development/analysis.
        mode = "sequential-drive-query"
        if mode == "sequential-drive-query":
            print("Running drive query in sequential mode")

            # TODO: Run with Utility.interruptable_run() so that even long-lived commands can have a signal sent to it
            #  to shutdown early.

            # Not checking return codes here because Clonezilla does not, and some of these commands are expected to
            # fail. The Utility.run() command prints the output to stdout.
            GLib.idle_add(self.please_wait_popup.set_secondary_label_text,
                          _("Running: {app}").format(app="lsblk"))
            process, flat_command_string, fail_description = Utility.run(
                "lsblk", lsblk_cmd_list, use_c_locale=True)
            lsblk_json_dict = json.loads(process.stdout)

            if self.is_stop_requested():
                GLib.idle_add(self.error_message_callback, False,
                              _("Operation cancelled by user."))
                return

            GLib.idle_add(self.please_wait_popup.set_secondary_label_text,
                          _("Running: {app}").format(app="blkid"))
            process, flat_command_string, fail_description = Utility.run(
                "blkid", blkid_cmd_list, use_c_locale=True)
            blkid_dict = Blkid.parse_blkid_output(process.stdout)

            if self.is_stop_requested():
                GLib.idle_add(self.error_message_callback, False,
                              _("Operation cancelled by user."))
                return

            GLib.idle_add(self.please_wait_popup.set_secondary_label_text,
                          _("Running: {app}").format(app="os-prober"))
            # Use os-prober to get OS information (running WITH original locale information
            process, flat_command_string, fail_description = Utility.run(
                "osprober", os_prober_cmd_list, use_c_locale=True)
            os_prober_dict = OsProber.parse_os_prober_output(process.stdout)

            if self.is_stop_requested():
                GLib.idle_add(self.error_message_callback, False,
                              _("Operation cancelled by user."))
                return

            for lsblk_dict in lsblk_json_dict['blockdevices']:
                partition_longdevname = lsblk_dict['name']
                print("Going to run parted and sfdisk on " +
                      partition_longdevname)
                try:
                    GLib.idle_add(
                        self.please_wait_popup.set_secondary_label_text,
                        _("Running {app} on {device}").format(
                            app="parted", device=partition_longdevname))
                    process, flat_command_string, fail_description = Utility.run(
                        "parted",
                        self._get_parted_cmd_list(partition_longdevname),
                        use_c_locale=True)
                    if "unrecognized disk label" not in process.stderr:
                        parted_dict_dict[
                            partition_longdevname] = Parted.parse_parted_output(
                                process.stdout)
                    else:
                        print("Parted says " + process.stderr)

                    if self.is_stop_requested():
                        GLib.idle_add(self.error_message_callback, False,
                                      _("Operation cancelled by user."))
                        return
                    GLib.idle_add(
                        self.please_wait_popup.set_secondary_label_text,
                        _("Running {app} on {device}").format(
                            app="sfdisk", device=partition_longdevname))
                    process, flat_command_string, fail_description = Utility.run(
                        "sfdisk",
                        self._get_sfdisk_cmd_list(partition_longdevname),
                        use_c_locale=True)
                    sfdisk_dict_dict[
                        partition_longdevname] = Sfdisk.parse_sfdisk_dump_output(
                            process.stdout)
                    if self.is_stop_requested():
                        GLib.idle_add(self.error_message_callback, False,
                                      _("Operation cancelled by user."))
                        return

                except Exception:
                    print("Could run run parted on " + partition_longdevname)
        elif mode == "parallel-drive-query":
            print("Running drive query in parallel mode")
            # Launch drive query in parallel. Parallel Python subprocess.Popen() approach adapted from [1]
            # [1] https://stackoverflow.com/a/636601
            cmd_dict = {
                ('lsblk', ""):
                subprocess.Popen(lsblk_cmd_list,
                                 env=env_C_locale,
                                 stdout=subprocess.PIPE,
                                 stderr=subprocess.PIPE,
                                 encoding="utf-8",
                                 universal_newlines=True),
                ('blkid', ""):
                subprocess.Popen(blkid_cmd_list,
                                 env=env_C_locale,
                                 stdout=subprocess.PIPE,
                                 stderr=subprocess.PIPE,
                                 encoding="utf-8",
                                 universal_newlines=True),
                ('os_prober', ""):
                subprocess.Popen(os_prober_cmd_list,
                                 env=env_C_locale,
                                 stdout=subprocess.PIPE,
                                 stderr=subprocess.PIPE,
                                 encoding="utf-8",
                                 universal_newlines=True),
            }
            while cmd_dict:
                print("drive_query_process is length " + str(len(cmd_dict)) +
                      " with contents " + str(cmd_dict))
                for key in list(cmd_dict.keys()):
                    proc = cmd_dict[key]
                    retcode = proc.poll()
                    if retcode is not None:  # Process finished.
                        cmd_dict.pop(key, None)
                        if key[0] == "lsblk" and retcode == 0:
                            # lsblk is complete, partition information can be used to launch the parted/sfdisk
                            lsblk_json_dict = json.loads(proc.stdout.read())
                            for lsblk_dict in lsblk_json_dict['blockdevices']:
                                partition_longdevname = lsblk_dict['name']
                                print("Launching parted and sfdisk on " +
                                      partition_longdevname)
                                try:
                                    cmd_dict[("parted", partition_longdevname
                                              )] = subprocess.Popen(
                                                  self._get_parted_cmd_list(
                                                      partition_longdevname),
                                                  env=env_C_locale,
                                                  encoding="utf-8",
                                                  universal_newlines=True)
                                    cmd_dict[("sfdisk", partition_longdevname
                                              )] = subprocess.Popen(
                                                  self._get_sfdisk_cmd_list(
                                                      partition_longdevname),
                                                  env=env_C_locale,
                                                  encoding="utf-8",
                                                  universal_newlines=True)
                                except Exception:
                                    print("Could launch sfdisk or parted on " +
                                          partition_longdevname)
                        elif key[0] == "blkid" and retcode == 0:
                            blkid_dict = Blkid.parse_blkid_output(
                                proc.stdout.read())
                        elif key[0] == "osprober" and retcode == 0:
                            os_prober_dict = OsProber.parse_os_prober_output(
                                proc.stdout.read())
                        elif key[
                                0] == "sfdisk" and retcode == 0 and proc.stdout is not None:
                            sfdisk_dict_dict[
                                key[1]] = Sfdisk.parse_sfdisk_dump_output(
                                    proc.stdout.read())
                        elif key[
                                0] == "parted" and retcode == 0 and proc.stdout is not None:
                            if proc.stderr is not None:
                                stderr = proc.stderr.read()
                                print("parted with key " + str(key) +
                                      " had stderr " + stderr)
                                if "unrecognized disk label" not in stderr:
                                    parted_dict_dict[
                                        key[1]] = Parted.parse_parted_output(
                                            proc.stdout.read())
                        else:
                            print(
                                "COULD NOT PROCESS process launched with key "
                                + str(key) + " return code" + str(retcode))
                            if proc.stdout is not None:
                                print("stdout:" + proc.stdout.read())
                            if proc.stderr is not None:
                                print(" stderr:" + proc.stderr.read())
                    else:  # No process is done, wait a bit and check again.
                        time.sleep(0.1)
                        continue
        else:
            raise Exception("Invalid drive query mode")
        self.drive_state = CombinedDriveState.construct_combined_drive_state_dict(
            lsblk_json_dict, blkid_dict, os_prober_dict, parted_dict_dict,
            sfdisk_dict_dict)
        pp = pprint.PrettyPrinter(indent=4)
        pp.pprint(self.drive_state)

        drive_query_end_time = datetime.now()
        print("Drive query took: " +
              str((drive_query_end_time - drive_query_start_time)))
        GLib.idle_add(self.populate_drive_selection_table)
コード例 #8
0
    def __init__(self, partition_long_device_node, absolute_path=None, enduser_filename=None):
        self.image_format = "METADATA_ONLY_FORMAT"
        self.long_device_node = partition_long_device_node
        if absolute_path is None:
            self.absolute_path = partition_long_device_node
        else:
            self.absolute_path = absolute_path

        if enduser_filename is None:
            self.absolute_path = partition_long_device_node
        else:
            self.enduser_filename = enduser_filename
        self.normalized_sfdisk_dict = {'absolute_path': None, 'sfdisk_dict': {'partitions': {}}, 'file_length': 0}
        self.user_notes = ""
        self.warning_dict = {}

        # Clonezilla format
        self.ebr_dict = {}
        self.efi_nvram_dat_absolute_path = None
        self.short_device_node_partition_list = []
        self.short_device_node_disk_list = []
        self.lvm_vg_dev_dict = {}
        self.lvm_logical_volume_dict = {}
        self.sfdisk_chs_dict = None
        self.dev_fs_dict = {}
        self.size_bytes = 0
        self.enduser_readable_size = ""
        self.is_needs_decryption = False
        self.parted_dict = {'partitions': {}}
        self.post_mbr_gap_dict = {}
        self._mbr_absolute_path = None

        statbuf = os.stat(self.absolute_path)
        self.last_modified_timestamp = format_datetime(datetime.fromtimestamp(statbuf.st_mtime))
        print("Last modified timestamp " + self.last_modified_timestamp)

        process, flat_command_string, failed_message = Utility.run("Get partition table", ["sfdisk", "--dump", partition_long_device_node], use_c_locale=True)
        if process.returncode != 0:
            # Expect devices without a partition table to not be able to extract partition table
            print("Could not extract filesystem using sfdisk: " + process.stderr)
        else:
            sfdisk_string = process.stdout
            f = tempfile.NamedTemporaryFile(mode='w', delete=False)
            f.write(sfdisk_string)
            f.close()
            self.normalized_sfdisk_dict = Sfdisk.generate_normalized_sfdisk_dict(f.name, self)
        if 'device' in self.normalized_sfdisk_dict['sfdisk_dict'].keys():
            self.short_device_node_disk_list = [self.normalized_sfdisk_dict['sfdisk_dict']['device']]

        # Get the parted partition table. For convenience, using the bytes unit, not sectors.
        parted_process, flat_command_string, failed_message = Utility.run("Get filesystem information",
                                                          ["parted", "--script", partition_long_device_node, "unit", "b",
                                                           "print"], use_c_locale=True)
        if parted_process.returncode != 0:
            # Expect devices without a partition table to not be able to extract partition table
            print("Could not extract filesystem using parted: " + parted_process.stderr)
        self.parted_dict = Parted.parse_parted_output(parted_process.stdout)
        if len(self.short_device_node_disk_list) == 0 and 'long_dev_node' in self.parted_dict.keys():
            self.short_device_node_disk_list = [self.parted_dict['long_dev_node']]

        pp = pprint.PrettyPrinter(indent=4)
        pp.pprint(self.parted_dict)

        lsblk_process, flat_command_string, failed_message = Utility.run("Querying device capacity",
                                                                         ["lsblk", "--getsize64", partition_long_device_node],
                                                                         use_c_locale=True)
        if lsblk_process.returncode != 0:
            # Expected for NBD device nodes
            print("Failed to get drive capacity from device node")

        # Create a CombinedDriveState structure for the MetadataOnlyImage, which may otherwise not be populated.
        lsblk_cmd_list = ["lsblk", "-o", "KNAME,NAME,SIZE,TYPE,FSTYPE,MOUNTPOINT,MODEL,SERIAL", "--paths", "--bytes",
                          "--json", self.long_device_node]
        process, flat_command_string, fail_description = Utility.run("lsblk", lsblk_cmd_list, use_c_locale=True)
        lsblk_json_dict = json.loads(process.stdout)

        # blkid is called in DriveQuery and without arugments it prints information about all *partitions* in the system
        # (eg, /dev/sda1, /dev/sda2), but not th base device. But with an argument, it only prints out the base device.
        # But globbing using an wildcard match prints out the base device *and* the partitions. Not ideal, but it works.
        partition_device_glob_list = glob.glob(self.long_device_node + "*")
        blkid_cmd_list = ["blkid"] + partition_device_glob_list
        process, flat_command_string, fail_description = Utility.run("blkid", blkid_cmd_list, use_c_locale=True)
        blkid_dict = Blkid.parse_blkid_output(process.stdout)

        # OS Prober takes too long to run
        os_prober_dict = {}

        self.drive_state = CombinedDriveState.construct_combined_drive_state_dict(lsblk_json_dict=lsblk_json_dict,
                                                                                  blkid_dict=blkid_dict,
                                                                                  osprober_dict=os_prober_dict,
                                                                                  parted_dict_dict={self.long_device_node:self.parted_dict},
                                                                                  sfdisk_dict_dict={self.long_device_node:self.normalized_sfdisk_dict})
        pp = pprint.PrettyPrinter(indent=4)
        pp.pprint(self.drive_state)

        self.image_format_dict_dict = collections.OrderedDict([])
        total_size_estimate = 0
        drive_state_partitions_dict = self.drive_state[self.long_device_node]['partitions']
        for partition_long_device_node in drive_state_partitions_dict:
            if 'type' in drive_state_partitions_dict[partition_long_device_node].keys() \
                    and drive_state_partitions_dict[partition_long_device_node]['type'] == "extended":
                # Skip extended partitions as they will be handled by the '-ebr' file
                continue
            self.image_format_dict_dict[partition_long_device_node] = {'type': "raw",
                                                                       'compression': "uncompressed",
                                                                       'is_lvm_logical_volume': False,
                                                                       'filesystem': drive_state_partitions_dict[partition_long_device_node]['filesystem']}

        # Estimate the disk size from sfdisk partition table backup
        last_partition_key, last_partition_final_byte = Sfdisk.get_highest_offset_partition(self.normalized_sfdisk_dict)
        self.size_bytes = last_partition_final_byte
        if self.size_bytes == 0:
            self.size_bytes = self.parted_dict['capacity']
        # Covert size in bytes to KB/MB/GB/TB as relevant
        self.enduser_readable_size = Utility.human_readable_filesize(int(self.size_bytes))
コード例 #9
0
    def do_backup(self):
        self.at_least_one_non_fatal_error = False
        self.requested_stop = False
        # Clear proc dictionary
        self.proc.clear()
        self.summary_message_lock = threading.Lock()
        self.summary_message = ""

        env = Utility.get_env_C_locale()

        print("mkdir " + self.dest_dir)
        os.mkdir(self.dest_dir)

        short_selected_device_node = re.sub('/dev/', '',
                                            self.selected_drive_key)
        enduser_date = datetime.today().strftime('%Y-%m-%d-%H%M')
        clonezilla_img_filepath = os.path.join(self.dest_dir, "clonezilla-img")
        with open(clonezilla_img_filepath, 'w') as filehandle:
            try:
                output = "This image was saved by Rescuezilla at " + enduser_date + "\nSaved by " + self.human_readable_version + "\nThe log during saving:\n----------------------------------------------------------\n\n"
                filehandle.write(output)
            except:
                tb = traceback.format_exc()
                traceback.print_exc()
                error_message = _(
                    "Failed to write destination file. Please confirm it is valid to create the provided file path, and try again."
                ) + "\n\n" + tb
                GLib.idle_add(self.completed_backup, False, error_message)
                return

        self.logger = Logger(clonezilla_img_filepath)
        GLib.idle_add(self.update_backup_progress_bar, 0)

        process, flat_command_string, failed_message = Utility.run(
            "Saving blkdev.list", [
                "lsblk", "-oKNAME,NAME,SIZE,TYPE,FSTYPE,MOUNTPOINT,MODEL",
                self.selected_drive_key
            ],
            use_c_locale=True,
            output_filepath=os.path.join(self.dest_dir, "blkdev.list"),
            logger=self.logger)
        if process.returncode != 0:
            with self.summary_message_lock:
                self.summary_message += failed_message
            GLib.idle_add(self.completed_backup, False, failed_message)
            return

        blkid_cmd_list = ["blkid"]
        sort_cmd_list = ["sort", "-V"]
        Utility.print_cli_friendly("blkid ", [blkid_cmd_list, sort_cmd_list])
        self.proc['blkid'] = subprocess.Popen(blkid_cmd_list,
                                              stdout=subprocess.PIPE,
                                              env=env,
                                              encoding='utf-8')

        process, flat_command_string, failed_message = Utility.run(
            "Saving blkid.list", ["blkid"],
            use_c_locale=True,
            output_filepath=os.path.join(self.dest_dir, "blkid.list"),
            logger=self.logger)
        if process.returncode != 0:
            with self.summary_message_lock:
                self.summary_message += failed_message
            GLib.idle_add(self.completed_backup, False, failed_message)
            return

        process, flat_command_string, failed_message = Utility.run(
            "Saving Info-lshw.txt", ["lshw"],
            use_c_locale=True,
            output_filepath=os.path.join(self.dest_dir, "Info-lshw.txt"),
            logger=self.logger)
        if process.returncode != 0:
            with self.summary_message_lock:
                self.summary_message += failed_message
            GLib.idle_add(self.completed_backup, False, failed_message)
            return

        info_dmi_txt_filepath = os.path.join(self.dest_dir, "Info-dmi.txt")
        with open(info_dmi_txt_filepath, 'w') as filehandle:
            filehandle.write(
                "# This image was saved from this machine with DMI info at " +
                enduser_date + ":\n")
            filehandle.flush()
        process, flat_command_string, failed_message = Utility.run(
            "Saving Info-dmi.txt", ["dmidecode"],
            use_c_locale=True,
            output_filepath=info_dmi_txt_filepath,
            logger=self.logger)
        if process.returncode != 0:
            with self.summary_message_lock:
                self.summary_message += failed_message
            GLib.idle_add(self.completed_backup, False, failed_message)
            return

        info_lspci_filepath = os.path.join(self.dest_dir, "Info-lspci.txt")
        with open(info_lspci_filepath, 'w') as filehandle:
            # TODO: Improve datetime format string.
            filehandle.write(
                "This image was saved from this machine with PCI info at " +
                enduser_date + "\n")
            filehandle.write("'lspci' results:\n")
            filehandle.flush()

        process, flat_command_string, failed_message = Utility.run(
            "Appending `lspci` output to Info-lspci.txt", ["lspci"],
            use_c_locale=True,
            output_filepath=info_lspci_filepath,
            logger=self.logger)
        if process.returncode != 0:
            with self.summary_message_lock:
                self.summary_message += failed_message
            GLib.idle_add(self.completed_backup, False, failed_message)
            return

        msg_delimiter_star_line = "*****************************************************."
        with open(info_lspci_filepath, 'a+') as filehandle:
            filehandle.write(msg_delimiter_star_line + "\n")
            filehandle.write("'lspci -n' results:\n")
            filehandle.flush()

        # Show PCI vendor and device codes as numbers instead of looking them up in the PCI ID list.
        process, flat_command_string, failed_message = Utility.run(
            "Appending `lspci -n` output to Info-lspci.txt", ["lspci", "-n"],
            use_c_locale=True,
            output_filepath=info_lspci_filepath,
            logger=self.logger)
        if process.returncode != 0:
            with self.summary_message_lock:
                self.summary_message += failed_message
            GLib.idle_add(self.completed_backup, False, failed_message)
            return

        info_smart_filepath = os.path.join(self.dest_dir, "Info-smart.txt")
        with open(info_smart_filepath, 'w') as filehandle:
            filehandle.write(
                "This image was saved from this machine with hard drive S.M.A.R.T. info at "
                + enduser_date + "\n")
            filehandle.write(msg_delimiter_star_line + "\n")
            filehandle.write("For the drive: " + self.selected_drive_key +
                             "\n")
            filehandle.flush()

        # VirtualBox doesn't support smart, so ignoring the exit code here.
        # FIXME: Improve this.
        process, flat_command_string, failed_message = Utility.run(
            "Saving Info-smart.txt",
            ["smartctl", "--all", self.selected_drive_key],
            use_c_locale=True,
            output_filepath=info_smart_filepath,
            logger=self.logger)

        filepath = os.path.join(self.dest_dir, "Info-packages.txt")
        # Save Debian package informtion
        if shutil.which("dpkg") is not None:
            rescuezilla_package_list = ["rescuezilla", "util-linux", "gdisk"]
            with open(filepath, 'w') as filehandle:
                filehandle.write(
                    "Image was saved by these Rescuezilla-related packages:\n "
                )
                for pkg in rescuezilla_package_list:
                    dpkg_process = subprocess.run(['dpkg', "--status", pkg],
                                                  capture_output=True,
                                                  encoding="UTF-8")
                    if dpkg_process.returncode != 0:
                        continue
                    for line in dpkg_process.stdout.split("\n"):
                        if re.search("^Version: ", line):
                            version = line[len("Version: "):]
                            filehandle.write(pkg + "-" + version + " ")
                filehandle.write("\nSaved by " + self.human_readable_version +
                                 ".\n")

        # TODO: Clonezilla creates a file named "Info-saved-by-cmd.txt" file, to allow users to re-run the exact
        #  command again without going through the wizard. The proposed Rescuezilla approach to this feature is
        #  discussed here: https://github.com/rescuezilla/rescuezilla/issues/106

        filepath = os.path.join(self.dest_dir, "parts")
        with open(filepath, 'w') as filehandle:
            i = 0
            for partition_key in self.partitions_to_backup:
                short_partition_key = re.sub('/dev/', '', partition_key)
                to_backup_dict = self.partitions_to_backup[partition_key]
                is_swap = False
                if 'filesystem' in to_backup_dict.keys(
                ) and to_backup_dict['filesystem'] == "swap":
                    is_swap = True
                if 'type' not in to_backup_dict.keys(
                ) or 'type' in to_backup_dict.keys(
                ) and 'extended' != to_backup_dict['type'] and not is_swap:
                    # Clonezilla does not write the extended partition node into the parts file,
                    # nor does it write swap partition node
                    filehandle.write('%s' % short_partition_key)
                    # Ensure no trailing space on final iteration (to match Clonezilla format exactly)
                    if i + 1 != len(self.partitions_to_backup.keys()):
                        filehandle.write(' ')
                i += 1
            filehandle.write('\n')

        filepath = os.path.join(self.dest_dir, "disk")
        with open(filepath, 'w') as filehandle:
            filehandle.write('%s\n' % short_selected_device_node)

        compact_parted_filename = short_selected_device_node + "-pt.parted.compact"
        # Parted drive information with human-readable "compact" units: KB/MB/GB rather than sectors.
        process, flat_command_string, failed_message = Utility.run(
            "Saving " + compact_parted_filename, [
                "parted", "--script", self.selected_drive_key, "unit",
                "compact", "print"
            ],
            use_c_locale=True,
            output_filepath=os.path.join(self.dest_dir,
                                         compact_parted_filename),
            logger=self.logger)
        if process.returncode != 0:
            with self.summary_message_lock:
                self.summary_message += failed_message
            GLib.idle_add(self.completed_backup, False, failed_message)
            return

        # Parted drive information with standard sector units. Clonezilla doesn't output easily parsable output using
        # the --machine flag, so for maximum Clonezilla compatibility neither does Rescuezilla.
        parted_filename = short_selected_device_node + "-pt.parted"
        parted_process, flat_command_string, failed_message = Utility.run(
            "Saving " + parted_filename, [
                "parted", "--script", self.selected_drive_key, "unit", "s",
                "print"
            ],
            use_c_locale=True,
            output_filepath=os.path.join(self.dest_dir, parted_filename),
            logger=self.logger)
        if process.returncode != 0:
            with self.summary_message_lock:
                self.summary_message += failed_message
            GLib.idle_add(self.completed_backup, False, failed_message)
            return

        parted_dict = Parted.parse_parted_output(parted_process.stdout)
        partition_table = parted_dict['partition_table']

        # Save MBR for both msdos and GPT disks
        if "gpt" == partition_table or "msdos" == partition_table:
            filepath = os.path.join(self.dest_dir,
                                    short_selected_device_node + "-mbr")
            process, flat_command_string, failed_message = Utility.run(
                "Saving " + filepath, [
                    "dd", "if=" + self.selected_drive_key, "of=" + filepath,
                    "bs=512", "count=1"
                ],
                use_c_locale=False,
                logger=self.logger)
            if process.returncode != 0:
                with self.summary_message_lock:
                    self.summary_message += failed_message
                GLib.idle_add(self.completed_backup, False, failed_message)
                return

        if "gpt" == partition_table:
            first_gpt_filename = short_selected_device_node + "-gpt-1st"
            dd_process, flat_command_string, failed_message = Utility.run(
                "Saving " + first_gpt_filename, [
                    "dd", "if=" + self.selected_drive_key,
                    "of=" + os.path.join(self.dest_dir, first_gpt_filename),
                    "bs=512", "count=34"
                ],
                use_c_locale=False,
                logger=self.logger)
            if process.returncode != 0:
                with self.summary_message_lock:
                    self.summary_message += failed_message
                GLib.idle_add(self.completed_backup, False, failed_message)
                return

            # From Clonezilla's scripts/sbin/ocs-functions:
            # We need to get the total size of disk so that we can skip and dump the last block:
            # The output of 'parted -s /dev/sda unit s print' is like:
            # --------------------
            # Disk /dev/hda: 16777215s
            # Sector size (logical/physical): 512B/512B
            # Partition Table: gpt
            #
            # Number  Start     End        Size       File system  Name     Flags
            #  1      34s       409640s    409607s    fat32        primary  msftres
            #  2      409641s   4316406s   3906766s   ext2         primary
            #  3      4316407s  15625000s  11308594s  reiserfs     primary
            # --------------------
            # to_seek = "$((${src_disk_size_sec}-33+1))"
            to_skip = parted_dict['capacity'] - 32
            second_gpt_filename = short_selected_device_node + "-gpt-2nd"
            process, flat_command_string, failed_message = Utility.run(
                "Saving " + second_gpt_filename, [
                    "dd", "if=" + self.selected_drive_key,
                    "of=" + os.path.join(self.dest_dir, second_gpt_filename),
                    "skip=" + str(to_skip), "bs=512", "count=33"
                ],
                use_c_locale=False,
                logger=self.logger)
            if process.returncode != 0:
                with self.summary_message_lock:
                    self.summary_message += failed_message
                GLib.idle_add(self.completed_backup, False, failed_message)
                return

            # LC_ALL=C sgdisk -b $target_dir_fullpath/$(to_filename ${ihd})-gpt.gdisk /dev/$ihd | tee --append ${OCS_LOGFILE}
            gdisk_filename = short_selected_device_node + "-gpt.gdisk"
            process, flat_command_string, failed_message = Utility.run(
                "Saving " + gdisk_filename, [
                    "sgdisk", "--backup",
                    os.path.join(self.dest_dir, gdisk_filename),
                    self.selected_drive_key
                ],
                use_c_locale=True,
                logger=self.logger)
            if process.returncode != 0:
                with self.summary_message_lock:
                    self.summary_message += failed_message
                GLib.idle_add(self.completed_backup, False, failed_message)
                return

            sgdisk_filename = short_selected_device_node + "-gpt.sgdisk"
            process, flat_command_string, failed_message = Utility.run(
                "Saving " + sgdisk_filename,
                ["sgdisk", "--print", self.selected_drive_key],
                use_c_locale=True,
                output_filepath=os.path.join(self.dest_dir, sgdisk_filename),
                logger=self.logger)
            if process.returncode != 0:
                with self.summary_message_lock:
                    self.summary_message += failed_message
                GLib.idle_add(self.completed_backup, False, failed_message)
                return
        elif "msdos" == partition_table:
            # image_save
            first_partition_key, first_partition_offset_bytes = CombinedDriveState.get_first_partition(
                self.partitions_to_backup)
            # Maximum hidden data to backup is 1024MB
            hidden_data_after_mbr_limit = 1024 * 1024 * 1024
            if first_partition_offset_bytes > hidden_data_after_mbr_limit:
                self.logger.write(
                    "Calculated very large hidden data after MBR size. Skipping"
                )
            else:
                first_partition_offset_sectors = int(
                    first_partition_offset_bytes / 512)
                hidden_mbr_data_filename = short_selected_device_node + "-hidden-data-after-mbr"
                # FIXME: Appears one sector too large.
                process, flat_command_string, failed_message = Utility.run(
                    "Saving " + hidden_mbr_data_filename, [
                        "dd", "if=" + self.selected_drive_key, "of=" +
                        os.path.join(self.dest_dir, hidden_mbr_data_filename),
                        "skip=1", "bs=512",
                        "count=" + str(first_partition_offset_sectors)
                    ],
                    use_c_locale=False,
                    logger=self.logger)
            if process.returncode != 0:
                with self.summary_message_lock:
                    self.summary_message += failed_message
                GLib.idle_add(self.completed_backup, False, failed_message)
                return

        else:
            self.logger.write("Partition table is: " + partition_table)

        # Parted sees drives with direct filesystem applied as loop partition table.
        if partition_table is not None and partition_table != "loop":
            sfdisk_filename = short_selected_device_node + "-pt.sf"
            process, flat_command_string, failed_message = Utility.run(
                "Saving " + sfdisk_filename,
                ["sfdisk", "--dump", self.selected_drive_key],
                output_filepath=os.path.join(self.dest_dir, sfdisk_filename),
                use_c_locale=True,
                logger=self.logger)
            if process.returncode != 0:
                with self.summary_message_lock:
                    self.summary_message += failed_message
                GLib.idle_add(self.completed_backup, False, failed_message)
                return

        process, flat_command_string, failed_message = Utility.run(
            "Retreiving disk geometry with sfdisk ",
            ["sfdisk", "--show-geometry", self.selected_drive_key],
            use_c_locale=True,
            logger=self.logger)
        if process.returncode != 0:
            with self.summary_message_lock:
                self.summary_message += failed_message
            GLib.idle_add(self.completed_backup, False, failed_message)
            return

        geometry_dict = Sfdisk.parse_sfdisk_show_geometry(process.stdout)
        filepath = os.path.join(self.dest_dir,
                                short_selected_device_node + "-chs.sf")
        with open(filepath, 'w') as filehandle:
            for key in geometry_dict.keys():
                output = key + "=" + str(geometry_dict[key])
                self.logger.write(output)
                filehandle.write('%s\n' % output)

        # Query all Physical Volumes (PV), Volume Group (VG) and Logical Volume (LV). See unit test for a worked example.
        # TODO: In the Rescuezilla application architecture, this LVM information is best extracted during the drive
        # TODO: query step, and then integrated into the "combined drive state" dictionary. Doing it during the backup
        # TODO: process matches how Clonezilla does it, which is sufficient for now.
        # FIXME: This section is duplicated in partitions_to_restore.py.
        # Start the Logical Volume Manager (LVM). Caller raises Exception on failure
        Lvm.start_lvm2(self.logger)
        relevant_vg_name_dict = {}
        vg_state_dict = Lvm.get_volume_group_state_dict(self.logger)
        for partition_key in list(self.partitions_to_backup.keys()):
            for report_dict in vg_state_dict['report']:
                for vg_dict in report_dict['vg']:
                    if 'pv_name' in vg_dict.keys(
                    ) and partition_key == vg_dict['pv_name']:
                        if 'vg_name' in vg_dict.keys():
                            vg_name = vg_dict['vg_name']
                        else:
                            GLib.idle_add(
                                ErrorMessageModalPopup.
                                display_nonfatal_warning_message, self.builder,
                                "Could not find volume group name vg_name in "
                                + str(vg_dict))
                            # TODO: Re-evaluate how exactly Clonezilla uses /NOT_FOUND and whether introducing it here
                            # TODO: could improve Rescuezilla/Clonezilla interoperability.
                            continue
                        if 'pv_uuid' in vg_dict.keys():
                            pv_uuid = vg_dict['pv_uuid']
                        else:
                            GLib.idle_add(
                                ErrorMessageModalPopup.
                                display_nonfatal_warning_message, self.builder,
                                "Could not find physical volume UUID pv_uuid in "
                                + str(vg_dict))
                            continue
                        relevant_vg_name_dict[vg_name] = partition_key
                        lvm_vg_dev_list_filepath = os.path.join(
                            self.dest_dir, "lvm_vg_dev.list")
                        with open(lvm_vg_dev_list_filepath,
                                  'a+') as filehandle:
                            filehandle.write(vg_name + " " + partition_key +
                                             " " + pv_uuid + "\n")

        lv_state_dict = Lvm.get_logical_volume_state_dict(self.logger)
        for report_dict in lv_state_dict['report']:
            for lv_dict in report_dict['lv']:
                # Only consider VGs that match the partitions to backup list
                if 'vg_name' in lv_dict.keys(
                ) and lv_dict['vg_name'] in relevant_vg_name_dict.keys():
                    vg_name = lv_dict['vg_name']
                    if 'lv_path' in lv_dict.keys():
                        lv_path = lv_dict['lv_path']
                    else:
                        GLib.idle_add(
                            ErrorMessageModalPopup.
                            display_nonfatal_warning_message, self.builder,
                            "Could not find lv_path name in " + str(lv_dict))
                        continue
                    file_command_process, flat_command_string, failed_message = Utility.run(
                        "logical volume file info",
                        ["file", "--dereference", "--special-files", lv_path],
                        use_c_locale=True,
                        logger=self.logger)
                    if file_command_process.returncode != 0:
                        with self.summary_message_lock:
                            self.summary_message += failed_message
                        GLib.idle_add(self.completed_backup, False,
                                      failed_message)
                        return

                    output = file_command_process.stdout.split(
                        " ", maxsplit=1)[1].strip()
                    lvm_logv_list_filepath = os.path.join(
                        self.dest_dir, "lvm_logv.list")
                    # Append to file
                    with open(lvm_logv_list_filepath, 'a+') as filehandle:
                        filehandle.write(lv_path + "  " + output + "\n")

                    if 'lv_dm_path' in lv_dict.keys():
                        # Device mapper path, eg /dev/mapper/vgtest-lvtest
                        lv_dm_path = lv_dict['lv_dm_path']
                    else:
                        GLib.idle_add(
                            self.completed_backup, False,
                            "Could not find lv_dm_path name in " +
                            str(lv_dict))
                        return

                    if lv_dm_path in self.drive_state.keys(
                    ) and 'partitions' in self.drive_state[lv_dm_path].keys():
                        # Remove the partition key associated with the volume group that contains this LVM logical volume
                        # eg, /dev/sdc1 with detected filesystem, and replace it with  the logical volume filesystem.
                        # In other words, don't backup both the /dev/sdc1 device node AND the /dev/mapper node.
                        long_partition_key = relevant_vg_name_dict[
                            lv_dict['vg_name']]
                        self.partitions_to_backup.pop(long_partition_key, None)
                        for logical_volume in self.drive_state[lv_dm_path][
                                'partitions'].keys():
                            # Use the system drive state to get the exact filesystem for this /dev/mapper/ node,
                            # as derived from multiple sources (parted, lsblk etc) like how Clonezilla does it.
                            self.partitions_to_backup[
                                lv_path] = self.drive_state[lv_dm_path][
                                    'partitions'][logical_volume]
                            self.partitions_to_backup[lv_path]['type'] = 'part'

                    lvm_vgname_filepath = os.path.join(
                        self.dest_dir, "lvm_" + vg_name + ".conf")
                    # TODO: Evaluate the Clonezilla message from 2013 message that this command won't work on NFS
                    # TODO: due to a vgcfgbackup file lock issue.
                    vgcfgbackup_process, flat_command_string, failed_message = Utility.run(
                        "Saving LVM VG config " + lvm_vgname_filepath, [
                            "vgcfgbackup", "--file", lvm_vgname_filepath,
                            vg_name
                        ],
                        use_c_locale=True,
                        logger=self.logger)
                    if vgcfgbackup_process.returncode != 0:
                        with self.summary_message_lock:
                            self.summary_message += failed_message
                        GLib.idle_add(self.completed_backup, False,
                                      failed_message)
                        return

        filepath = os.path.join(self.dest_dir, "dev-fs.list")
        with open(filepath, 'w') as filehandle:
            filehandle.write('# <Device name>   <File system>\n')
            filehandle.write(
                '# The file systems detected below are a combination of several sources. The values may differ from `blkid` and `parted`.\n'
            )
            for partition_key in self.partitions_to_backup.keys():
                filesystem = self.partitions_to_backup[partition_key][
                    'filesystem']
                filehandle.write('%s %s\n' % (partition_key, filesystem))

        partition_number = 0
        for partition_key in self.partitions_to_backup.keys():
            partition_number += 1
            total_progress_float = Utility.calculate_progress_ratio(
                0, partition_number, len(self.partitions_to_backup.keys()))
            GLib.idle_add(self.update_backup_progress_bar,
                          total_progress_float)
            is_unmounted, message = Utility.umount_warn_on_busy(partition_key)
            if not is_unmounted:
                self.logger.write(message)
                with self.summary_message_lock:
                    self.summary_message += message + "\n"
                GLib.idle_add(self.completed_backup, False, message)

            short_device_node = re.sub('/dev/', '', partition_key)
            short_device_node = re.sub('/', '-', short_device_node)
            filesystem = self.partitions_to_backup[partition_key]['filesystem']

            if 'type' in self.partitions_to_backup[partition_key].keys() and 'extended' in \
                    self.partitions_to_backup[partition_key]['type']:
                self.logger.write("Detected " + partition_key +
                                  " as extended partition. Backing up EBR")
                filepath = os.path.join(self.dest_dir,
                                        short_device_node + "-ebr")
                process, flat_command_string, failed_message = Utility.run(
                    "Saving " + filepath, [
                        "dd", "if=" + partition_key, "of=" + filepath,
                        "bs=512", "count=1"
                    ],
                    use_c_locale=False,
                    logger=self.logger)
            if process.returncode != 0:
                with self.summary_message_lock:
                    self.summary_message += failed_message
                GLib.idle_add(self.completed_backup, False, failed_message)
                return

            if filesystem == 'swap':
                filepath = os.path.join(
                    self.dest_dir, "swappt-" + short_device_node + ".info")
                with open(filepath, 'w') as filehandle:
                    uuid = ""
                    label = ""
                    if 'uuid' in self.partitions_to_backup[partition_key].keys(
                    ):
                        uuid = self.partitions_to_backup[partition_key]['uuid']
                    if 'label' in self.partitions_to_backup[
                            partition_key].keys():
                        label = self.partitions_to_backup[partition_key][
                            'label']
                    filehandle.write('UUID="%s"\n' % uuid)
                    filehandle.write('LABEL="%s"\n' % label)
                    with self.summary_message_lock:
                        self.summary_message += _(
                            "Successful backup of swap partition {partition_name}"
                        ).format(partition_name=partition_key) + "\n"
                continue

            # Clonezilla uses -q2 priority by default (partclone > partimage > dd).
            # PartImage does not appear to be maintained software, so for simplicity, Rescuezilla is using a
            # partclone > partclone.dd priority
            # [1] https://clonezilla.org/clonezilla-live/doc/01_Save_disk_image/advanced/09-advanced-param.php

            # Expand upon Clonezilla's ocs-get-comp-suffix() function
            compression_suffix = "gz"
            split_size = "4GB"
            # Partclone dd blocksize (16MB)
            partclone_dd_bs = "16777216"
            # TODO: Re-enable APFS support -- currently partclone Apple Filesystem is not used because it's too unstable [1]
            # [1] https://github.com/rescuezilla/rescuezilla/issues/65
            if shutil.which("partclone." +
                            filesystem) is not None and filesystem != "apfs":
                partclone_cmd_list = [
                    "partclone." + filesystem, "--logfile",
                    "/var/log/partclone.log", "--clone", "--source",
                    partition_key, "--output", "-"
                ]
                filepath = os.path.join(
                    self.dest_dir, short_device_node + "." + filesystem +
                    "-ptcl-img." + compression_suffix + ".")
                split_cmd_list = [
                    "split", "--suffix-length=2", "--bytes=" + split_size, "-",
                    filepath
                ]
            elif shutil.which("partclone.dd") is not None:
                partclone_cmd_list = [
                    "partclone.dd", "--buffer_size=" + partclone_dd_bs,
                    "--logfile", "/var/log/partclone.log", "--source",
                    partition_key, "--output", "-"
                ]
                filepath = os.path.join(
                    self.dest_dir, short_device_node + ".dd-ptcl-img." +
                    compression_suffix + ".")
                split_cmd_list = [
                    "split", "--suffix-length=2", "--bytes=" + split_size, "-",
                    filepath
                ]
            else:
                GLib.idle_add(self.completed_backup, False,
                              "Partclone not found.")
                return

            filesystem_backup_message = _(
                "Backup {partition_name} containing filesystem {filesystem} to {destination}"
            ).format(partition_name=partition_key,
                     filesystem=filesystem,
                     destination=filepath)
            GLib.idle_add(self.update_main_statusbar,
                          filesystem_backup_message)
            self.logger.write(filesystem_backup_message)

            gzip_cmd_list = ["gzip", "--stdout"]
            self.proc['partclone_backup_' + partition_key] = subprocess.Popen(
                partclone_cmd_list,
                stdout=subprocess.PIPE,
                stderr=subprocess.PIPE,
                env=env,
                encoding='utf-8')

            self.proc['gzip_' + partition_key] = subprocess.Popen(
                gzip_cmd_list,
                stdin=self.proc['partclone_backup_' + partition_key].stdout,
                stdout=subprocess.PIPE,
                env=env,
                encoding='utf-8')

            self.proc['split_' + partition_key] = subprocess.Popen(
                split_cmd_list,
                stdin=self.proc['gzip_' + partition_key].stdout,
                stdout=subprocess.PIPE,
                env=env,
                encoding='utf-8')

            # Process partclone output. Partclone outputs an update every 3 seconds, so processing the data
            # on the current thread, for simplicity.
            # Poll process.stdout to show stdout live
            while True:
                if self.requested_stop:
                    return

                output = self.proc['partclone_backup_' +
                                   partition_key].stderr.readline()
                if self.proc['partclone_backup_' +
                             partition_key].poll() is not None:
                    break
                if output:
                    temp_dict = Partclone.parse_partclone_output(output)
                    if 'completed' in temp_dict.keys():
                        total_progress_float = Utility.calculate_progress_ratio(
                            temp_dict['completed'] / 100.0, partition_number,
                            len(self.partitions_to_backup.keys()))
                        GLib.idle_add(self.update_backup_progress_bar,
                                      total_progress_float)
                    if 'remaining' in temp_dict.keys():
                        GLib.idle_add(
                            self.update_backup_progress_status,
                            filesystem_backup_message + "\n\n" + output)
            rc = self.proc['partclone_backup_' + partition_key].poll()

            self.proc['partclone_backup_' + partition_key].stdout.close(
            )  # Allow p1 to receive a SIGPIPE if p2 exits.
            self.proc['gzip_' + partition_key].stdout.close(
            )  # Allow p2 to receive a SIGPIPE if p3 exits.
            output, err = self.proc['partclone_backup_' +
                                    partition_key].communicate()
            self.logger.write("Exit output " + str(output) + "stderr " +
                              str(err))
            if self.proc['partclone_backup_' + partition_key].returncode != 0:
                partition_summary = _(
                    "<b>Failed to backup partition</b> {partition_name}"
                ).format(partition_name=partition_key) + "\n"
                with self.summary_message_lock:
                    self.summary_message += partition_summary
                self.at_least_one_non_fatal_error = True
                proc_stdout = self.proc['partclone_backup_' +
                                        partition_key].stdout
                proc_stderr = self.proc['partclone_backup_' +
                                        partition_key].stderr
                extra_info = "\nThe command used internally was:\n\n" + flat_command_string + "\n\n" + "The output of the command was: " + str(
                    proc_stdout) + "\n\n" + str(proc_stderr)
                compression_stderr = self.proc['gzip_' + partition_key].stderr
                if compression_stderr is not None and compression_stderr != "":
                    extra_info += "\n\n" + str(
                        gzip_cmd_list) + " stderr: " + compression_stderr

                # TODO: Try to backup again, but using partclone.dd
                GLib.idle_add(
                    ErrorMessageModalPopup.display_nonfatal_warning_message,
                    self.builder, partition_summary + extra_info)

            else:
                with self.summary_message_lock:
                    self.summary_message += _(
                        "Successful backup of partition {partition_name}"
                    ).format(partition_name=partition_key) + "\n"

        # GLib.idle_add(self.update_progress_bar, (i + 1) / len(self.restore_mapping_dict.keys()))
        if self.requested_stop:
            return

        progress_ratio = i / len(self.partitions_to_backup.keys())
        i += 1
        # Display 100% progress for user
        GLib.idle_add(self.update_backup_progress_bar, progress_ratio)
        sleep(1.0)
        """
            partclone_cmd_list = ["partclone", "--logfile", "/tmp/rescuezilla_logfile.txt", "--overwrite", "/dev/"]

              if [ "$fs_p" != "dd" ]; then
    cmd_partclone="partclone.${fs_p} $PARTCLONE_SAVE_OPT -L $partclone_img_info_tmp -c -s $source_dev --output - | $compress_prog_opt"
  else
    # Some parameters for partclone.dd are not required. Here "-c" is not provided by partclone.dd when saving.
    cmd_partclone="partclone.${fs_p} $PARTCLONE_SAVE_OPT --buffer_size ${partclone_dd_bs} -L $partclone_img_info_tmp -s $source_dev --output - | $compress_prog_opt"
  fi
  case "$VOL_LIMIT" in
    [1-9]*)
       # $tgt_dir/${tgt_file}.${fs_pre}-img. is prefix, the last "." is necessary make the output file is like hda1.${fs_pre}-img.aa, hda1.${fs_pre}-img.ab. We do not add -d to make it like hda1.${fs_pre}-img.00, hda1.${fs_pre}-img.01, since it will confuse people that it looks like created by partimage (hda1.${fs_pre}-img.000, hda1.${fs_pre}-img.001)
       cmd_partclone="${cmd_partclone} | split -a $split_suf_len -b ${VOL_LIMIT}MB - $tgt_dir/$(to_filename ${tgt_file}).${fs_pre}-img.${comp_suf}. 2> $split_error"
       ;;
    *)
       cmd_partclone="${cmd_partclone} > $tgt_dir/$(to_filename ${tgt_file}).${fs_pre}-img.${comp_suf} 2> $split_error"
       ;;
  esac
  echo "Run partclone: $cmd_partclone" | tee --append ${OCS_LOGFILE}
  LC_ALL=C eval "(${cmd_partclone} && exit \${PIPESTATUS[0]})"


            cmd_partimage = "partimage $DEFAULT_PARTIMAGE_SAVE_OPT $PARTIMAGE_SAVE_OPT -B gui=no save $source_dev stdout | $compress_prog_opt"
            #case
            #"$VOL_LIMIT" in
            #[1 - 9] *)
            # "$tgt_dir/${tgt_file}." is prefix, the last "." is necessary
            # make the output file is like hda1.aa, hda1.ab.
            # We do not add -d to make it like hda1.00, hda1.01, since it will confuse people that it looks like created by partimage (hda1.000, hda1.001)
            cmd_partimage = "${cmd_partimage} | split -a $split_suf_len -b ${VOL_LIMIT}MB - $tgt_dir/${tgt_file}."
            """

        # Do checksum
        # IMG_ID=$(LC_ALL=C sha512sum $img_dir/clonezilla-img | awk -F" " '{print $1}')" >> $img_dir/Info-img-id.txt

        GLib.idle_add(self.completed_backup, True, "")
コード例 #10
0
    def _do_drive_query(self):
        env_C_locale = Utility.get_env_C_locale()

        drive_query_start_time = datetime.now()
        lsblk_cmd_list = [
            "lsblk", "-o", "KNAME,NAME,SIZE,TYPE,FSTYPE,MOUNTPOINT,MODEL",
            "--paths", "--bytes", "--json"
        ]
        blkid_cmd_list = ["blkid"]
        os_prober_cmd_list = ["os-prober"]

        lsblk_json_dict = {}
        blkid_dict = {}
        os_prober_dict = {}
        parted_dict_dict = collections.OrderedDict([])
        sfdisk_dict_dict = collections.OrderedDict([])

        # Clonezilla combines drive, partition and filesystem from multiple data sources (lsblk, blkid, parted etc)
        # Rescuezilla continues this approach to reach best possible Clonezilla compatibility.
        #
        # However this sequential querying is slow. A parallel approach should be in theory much faster (but might be
        # less reliable if internal commands are creating file locks etc.)
        #
        # In practice, the sequential approach was about 25% faster than a first-cut (polling-based) parallel approach.
        # Parallel mode currently disabled, but kept for further development/analysis.
        mode = "sequential-drive-query"
        if mode == "sequential-drive-query":
            print("Running drive query in sequential mode")
            lsblk_stdout, lsblk_stderr, lsblk_return_code = Utility.run_external_command(
                lsblk_cmd_list, self.temp_callback, env_C_locale)
            lsblk_json_dict = json.loads(lsblk_stdout)
            blkid_stdout, blkid_stderr, blkid_return_code = Utility.run_external_command(
                blkid_cmd_list, self.temp_callback, env_C_locale)
            blkid_dict = Blkid.parse_blkid_output(blkid_stdout)

            # Use os-prober to get OS information (running WITH original locale information
            os_prober_stdout, os_prober_stderr, os_prober_return_code = Utility.run_external_command(
                os_prober_cmd_list, self.temp_callback, os.environ.copy())
            os_prober_dict = OsProber.parse_os_prober_output(os_prober_stdout)

            for lsblk_dict in lsblk_json_dict['blockdevices']:
                partition_longdevname = lsblk_dict['name']
                print("Going to run parted and sfdisk on " +
                      partition_longdevname)
                try:
                    parted_stdout, parted_stderr, parted_return_code = Utility.run_external_command(
                        self._get_parted_cmd_list(partition_longdevname),
                        self.temp_callback, env_C_locale)
                    if "unrecognized disk label" not in parted_stderr:
                        parted_dict_dict[
                            partition_longdevname] = Parted.parse_parted_output(
                                parted_stdout)
                    else:
                        print("Parted says " + parted_stderr)
                    sfdisk_stdout, sfdisk_stderr, sfdisk_return_code = Utility.run_external_command(
                        self._get_sfdisk_cmd_list(partition_longdevname),
                        self.temp_callback, env_C_locale)
                    sfdisk_dict_dict[
                        partition_longdevname] = Sfdisk.parse_sfdisk_dump_output(
                            sfdisk_stdout)
                except Exception:
                    print("Could run run parted on " + partition_longdevname)
        elif mode == "parallel-drive-query":
            print("Running drive query in parallel mode")
            # Launch drive query in parallel. Parallel Python subprocess.Popen() approach adapted from [1]
            # [1] https://stackoverflow.com/a/636601
            cmd_dict = {
                ('lsblk', ""):
                subprocess.Popen(lsblk_cmd_list,
                                 env=env_C_locale,
                                 stdout=subprocess.PIPE,
                                 stderr=subprocess.PIPE,
                                 encoding="utf-8",
                                 universal_newlines=True),
                ('blkid', ""):
                subprocess.Popen(blkid_cmd_list,
                                 env=env_C_locale,
                                 stdout=subprocess.PIPE,
                                 stderr=subprocess.PIPE,
                                 encoding="utf-8",
                                 universal_newlines=True),
                ('os_prober', ""):
                subprocess.Popen(os_prober_cmd_list,
                                 env=env_C_locale,
                                 stdout=subprocess.PIPE,
                                 stderr=subprocess.PIPE,
                                 encoding="utf-8",
                                 universal_newlines=True),
            }
            while cmd_dict:
                print("drive_query_process is length " + str(len(cmd_dict)) +
                      " with contents " + str(cmd_dict))
                for key in list(cmd_dict.keys()):
                    proc = cmd_dict[key]
                    retcode = proc.poll()
                    if retcode is not None:  # Process finished.
                        cmd_dict.pop(key, None)
                        if key[0] == "lsblk" and retcode == 0:
                            # lsblk is complete, partition information can be used to launch the parted/sfdisk
                            lsblk_json_dict = json.loads(proc.stdout.read())
                            for lsblk_dict in lsblk_json_dict['blockdevices']:
                                partition_longdevname = lsblk_dict['name']
                                print("Launching parted and sfdisk on " +
                                      partition_longdevname)
                                try:
                                    cmd_dict[("parted", partition_longdevname
                                              )] = subprocess.Popen(
                                                  self._get_parted_cmd_list(
                                                      partition_longdevname),
                                                  env=env_C_locale,
                                                  encoding="utf-8",
                                                  universal_newlines=True)
                                    cmd_dict[("sfdisk", partition_longdevname
                                              )] = subprocess.Popen(
                                                  self._get_sfdisk_cmd_list(
                                                      partition_longdevname),
                                                  env=env_C_locale,
                                                  encoding="utf-8",
                                                  universal_newlines=True)
                                except Exception:
                                    print("Could launch sfdisk or parted on " +
                                          partition_longdevname)
                        elif key[0] == "blkid" and retcode == 0:
                            blkid_dict = Blkid.parse_blkid_output(
                                proc.stdout.read())
                        elif key[0] == "osprober" and retcode == 0:
                            os_prober_dict = OsProber.parse_os_prober_output(
                                proc.stdout.read())
                        elif key[
                                0] == "sfdisk" and retcode == 0 and proc.stdout is not None:
                            sfdisk_dict_dict[
                                key[1]] = Sfdisk.parse_sfdisk_dump_output(
                                    proc.stdout.read())
                        elif key[
                                0] == "parted" and retcode == 0 and proc.stdout is not None:
                            if proc.stderr is not None:
                                stderr = proc.stderr.read()
                                print("parted with key " + key +
                                      " had stderr " + stderr)
                                if "unrecognized disk label" not in stderr:
                                    parted_dict_dict[
                                        key[1]] = Parted.parse_parted_output(
                                            proc.stdout.read())
                        else:
                            print(
                                "COULD NOT PROCESS process launched with key "
                                + str(key) + " return code" + str(retcode))
                            if proc.stdout is not None:
                                print("stdout:" + proc.stdout.read())
                            if proc.stderr is not None:
                                print(" stderr:" + proc.stderr.read())
                    else:  # No process is done, wait a bit and check again.
                        time.sleep(0.1)
                        continue
        else:
            raise Exception("Invalid drive query mode")
        self.drive_state = CombinedDriveState.construct_combined_drive_state_dict(
            lsblk_json_dict, blkid_dict, os_prober_dict, parted_dict_dict,
            sfdisk_dict_dict)
        pp = pprint.PrettyPrinter(indent=4)
        pp.pprint(self.drive_state)

        drive_query_end_time = datetime.now()
        print("Drive query took: " +
              str((drive_query_end_time - drive_query_start_time)))
        GLib.idle_add(self.populate_drive_selection_table)