コード例 #1
0
    def scan_backup_image(dir, prefix, is_needs_decryption):
        image_format_dict = {}
        dd_image_abs_path_glob_list = ClonezillaImage._get_glob_list_of_split_images(
            os.path.join(dir, prefix + "[.]dd-img[.][a-zA-Z][a-zA-Z]"))
        ntfsclone_image_abs_path_glob_list = ClonezillaImage._get_glob_list_of_split_images(
            os.path.join(dir, prefix + "[.]ntfs-img[.][a-zA-Z][a-zA-Z]"))
        partclone_image_abs_path_glob_list = ClonezillaImage._get_glob_list_of_split_images(
            os.path.join(dir, prefix + "[.]*-ptcl-img[.][a-zA-Z]*[.][a-zA-Z][a-zA-Z]"))
        partimage_image_abs_path_glob_list = ClonezillaImage._get_glob_list_of_split_images(
            os.path.join(dir, prefix + "[.][a-zA-Z][a-zA-Z]"))
        swap_partition_info_glob_list = glob.glob(os.path.join(dir, "swappt-" + prefix + ".info"))

        if len(dd_image_abs_path_glob_list) > 0:
            image_format_dict = {'type': "dd",
                                 'absolute_filename_glob_list': dd_image_abs_path_glob_list,
                                 'compression': ClonezillaImage.detect_compression(dd_image_abs_path_glob_list),
                                 'binary': "partclone.dd",
                                 "prefix": prefix,
                                 'is_lvm_logical_volume': False}

        elif len(ntfsclone_image_abs_path_glob_list) > 0:
            image_format_dict = {'type': "ntfsclone",
                                 'absolute_filename_glob_list': ntfsclone_image_abs_path_glob_list,
                                 'compression': ClonezillaImage.detect_compression(ntfsclone_image_abs_path_glob_list),
                                 'binary': "ntfsclone",
                                 "prefix": prefix,
                                 'is_lvm_logical_volume': False}
        elif len(partclone_image_abs_path_glob_list) > 0:
            m = utility.REMatcher(os.path.basename(partclone_image_abs_path_glob_list[0]))
            # sdf1.btrfs-ptcl-img.gz.aa
            if m.match(r"" + prefix + "[.]([a-zA-Z0-9+]+)-ptcl-img[.][a-zA-Z0-9]+[.]aa"):
                # Clonezilla's image has to be detected. The filename only contains the compression for partclone images, but not
                # for the other formats.
                filesystem = m.group(1)
                image_format_dict = {'type': "partclone",
                                     'absolute_filename_glob_list': partclone_image_abs_path_glob_list,
                                     'compression': ClonezillaImage.detect_compression(
                                         partclone_image_abs_path_glob_list),
                                     'filesystem': filesystem,
                                     'binary': "partclone." + filesystem,
                                     "prefix": prefix,
                                     'is_lvm_logical_volume': False}
        # PartImage images have a much wider wildcard that will match the above formats, so it's important we test it last
        elif len(partimage_image_abs_path_glob_list) > 0:
            image_format_dict = {'type': "partimage",
                                 'absolute_filename_glob_list': partimage_image_abs_path_glob_list,
                                 'compression': ClonezillaImage.detect_compression(partimage_image_abs_path_glob_list),
                                 'binary': 'partimage',
                                 "prefix": prefix,
                                 'is_lvm_logical_volume': False}
        elif len(swap_partition_info_glob_list) > 0:
            if not is_needs_decryption:
                image_format_dict = Swappt.parse_swappt_info(
                    Utility.read_file_into_string(swap_partition_info_glob_list[0]))
                image_format_dict['type'] = "swap"
                image_format_dict['prefix'] = prefix
                image_format_dict['is_lvm_logical_volume'] = False
        else:
            print("Unable to find associated image for " + prefix)
        return image_format_dict
コード例 #2
0
ファイル: qemu_image.py プロジェクト: xiogun/rescuezilla
    def is_supported_extension(filename):
        # https://fileinfo.com/filetypes/disk_image
        # https://qemu-project.gitlab.io/qemu/system/images.html
        supported_extensions = {
                                # The ".img" extension is not listed because it's also used by FOG Project and
                                # Foxclone, which causes misleading entries when Rescuezilla scans it as a raw
                                # uncompressed image.
                                # Also not listed ".iso" .extension because the value add for ISO9660 ISOs is low
                                # and the scan takes time.
                                "RAW": [".dd"],
                                "VirtualBox": [".vdi"],
                                "VMWare": [".vhd", ".vmdk"],
                                "Hyper-V": [".vhdx", ".vhd"],
                                "QEMU": [".qcow2", ".qcow", ".qed"],
                                "Apple DMG": [".dmg"],
                                "Parallels": [".hds", ".hdd", ".fdd"]
        }
        # Don't match on Redo Rescue images, which have filenames such as "20210307_sda1_001.img"
        m = utility.REMatcher(filename)
        if m.match(r".*_\d\d\d\.img"):
            return False

        for key in supported_extensions.keys():
            for extension in supported_extensions[key]:
                if filename.lower().endswith(extension):
                    return True
        return False
コード例 #3
0
 def get_short_device_from_swappt_info_filename(
         swap_partition_info_abs_path):
     m = utility.REMatcher(os.path.basename(swap_partition_info_abs_path))
     if m.match("swappt-([a-zA-Z0-9-+]+).info"):
         short_device_node = m.group(1)
     else:
         raise Exception("Unable to extract short device node from " +
                         swap_partition_info_abs_path)
     return short_device_node
コード例 #4
0
 def parse_sfdisk_show_geometry(line):
     temp_dict = {}
     m = utility.REMatcher(line)
     if m.match(
             r"^/dev/.*:\s*([0-9]*)\scylinders,\s([0-9]*)\sheads,\s([0-9]*)\ssectors/track$"
     ):
         temp_dict['cylinders'] = int(m.group(1))
         temp_dict['heads'] = int(m.group(2))
         temp_dict['sectors'] = int(m.group(3))
         return temp_dict
     else:
         print("Could not process: " + line)
         return temp_dict
コード例 #5
0
 def parse_proc_mdstat_string(proc_mdstat_stringe):
     proc_mdstat_dict = {}
     for line in proc_mdstat_stringe.splitlines():
         m = utility.REMatcher(line)
         if m.match(r"Personalities : ([.*])"):
             # TODO: Process further. If required
             proc_mdstat_dict['personalities'] = m.group(1)
             continue
         elif m.match(r"([a-zA-Z0-9]+) : active.*"):
             md_device = m.group(1)
             # TODO: Process further. If required
             proc_mdstat_dict[md_device] = line
         else:
             print("Not processing line" + line)
     return proc_mdstat_dict
コード例 #6
0
 def _parse_filename(apart_gtk_filename):
     m = utility.REMatcher(apart_gtk_filename)
     # Filename in format "(name)-(timestamp).apt.(filesystem).(compression)", with an optional ".inprogress" suffix
     if not m.match(r"^([a-zA-Z0-9-_+]*)-([0-9]+-[0-9]+-[0-9]+T[0-9]+)\.apt\.([a-zA-Z0-9+]+)\.([a-zA-Z0-9]+)([\.inprogress]?)"):
         raise ValueError("Unable to process ApartGTK image: " + apart_gtk_filename)
     else:
         apart_gtk_image_dict = {
         'name': m.group(1),
         'timestamp': m.group(2),
         'filesystem': m.group(3),
         'compression': m.group(4),
         'inprogress': False
     }
     if "inprogress" in apart_gtk_filename:
         apart_gtk_image_dict['inprogress'] = True
     return apart_gtk_image_dict
コード例 #7
0
    def is_supported_extension(filename):
        # https://fileinfo.com/filetypes/disk_image
        # https://qemu-project.gitlab.io/qemu/system/images.html


        # Ignore [/mnt/backup]/sbin/partclone.dd
        if filename.lower().endswith("partclone.dd"):
            return False, ""

        # Don't match on Redo Rescue images, which have filenames such as "20210307_sda1_001.img"
        m = utility.REMatcher(filename)
        if m.match(r".*_\d\d\d\.img"):
            return False, ""

        for key in QemuImage.SUPPORTED_EXTENSIONS.keys():
            for extension in QemuImage.SUPPORTED_EXTENSIONS[key]:
                if filename.lower().endswith(extension):
                    return True, extension
        return False, ""
コード例 #8
0
ファイル: partclone.py プロジェクト: xiogun/rescuezilla
 def parse_partclone_output(line):
     temp_dict = {}
     m = utility.REMatcher(line)
     if line.strip() == "":
         # Ignore
         return temp_dict
     elif line.startswith("Device size") or line.startswith(
             "Space in use") or line.startswith(
                 "Free Space") or line.startswith(
                     "Block size") or line.startswith("done!"):
         temp_dict['status'] = line
         return temp_dict
     elif line.startswith("Calculating"):
         temp_dict['status'] = line
         return temp_dict
     # Processing: Elapsed: 00:01:38, Remaining: 00:00:28, Completed:  77.58%,   2.57MB/min,
     elif m.match(
             r"\s*Elapsed:\s*([0-9]*:[0-9]*:[0-9]*),\s*Remaining:\s*([0-9]*:[0-9]*:[0-9]*),\s*Completed:\s*([0-9]*\.[0-9]*)%,\s*[Rate:]*\s*([0-9]*\.[0-9]\w*/min).*"
     ):
         temp_dict['elapsed'] = m.group(1)
         temp_dict['remaining'] = m.group(2)
         temp_dict['completed'] = float(m.group(3))
         temp_dict['rate'] = m.group(4)
         return temp_dict
     # Processing: current block:     121264, total block:     240127, Complete: 100.00%
     elif m.match(
             r"\s*current\sblock:\s*([0-9]*),\s*total\sblock:\s*([0-9]*),\s*Complete:\s*([0-9]*\.[0-9]*)%"
     ):
         temp_dict['current_block'] = m.group(1)
         temp_dict['total_block'] = m.group(2)
         temp_dict['completed_block_percent'] = m.group(3)
         return temp_dict
     elif line.startswith("Cloned"):
         temp_dict['status'] = line
         return temp_dict
     else:
         print("Not yet interpreting partclone output: " + line)
         return temp_dict
コード例 #9
0
 def are_partitions_listed_in_proc_partitions(proc_partitions_string, short_device_node):
     for line in proc_partitions_string.splitlines():
         m = utility.REMatcher(line)
         if m.match(r".*" + short_device_node + r"[p]+[0-9]+$"):
             return True
     return False
コード例 #10
0
    def _do_mount_command(self, please_wait_popup, callback, image,
                          partition_key, destination_path):
        env = Utility.get_env_C_locale()
        backup_timestart = datetime.now()
        try:
            if not os.path.exists(destination_path) and not os.path.isdir(
                    destination_path):
                os.mkdir(destination_path, 0o755)

            if shutil.which("nbdkit") is None:
                GLib.idle_add(callback, False, "nbdkit not found")
                GLib.idle_add(please_wait_popup.destroy)
                return

            if self._check_stop_and_cleanup(please_wait_popup, callback,
                                            destination_path):
                return

            GLib.idle_add(please_wait_popup.set_secondary_label_text,
                          "Loading Network Block Device driver (step 1/5)")
            # Ensure nbd-kernel module loaded
            process, flat_command_string, failed_message = Utility.interruptable_run(
                "Loading NBD kernel module", ["modprobe", "nbd"],
                use_c_locale=True,
                is_shutdown_fn=self.is_stop_requested)
            if process.returncode != 0:
                print(failed_message)
                GLib.idle_add(callback, False, failed_message)
                GLib.idle_add(please_wait_popup.destroy)
                return

            # Unmount any previous
            is_unmount_success, failed_message = self._do_unmount(
                destination_path)
            if not is_unmount_success:
                print(failed_message)
                GLib.idle_add(callback, False, failed_message)
                GLib.idle_add(please_wait_popup.destroy)
                return

            mount_device = MOUNTABLE_NBD_DEVICE
            compression = ""
            image_file_list = []
            if isinstance(self.selected_image, ClonezillaImage) or isinstance(self.selected_image, RedoBackupLegacyImage) \
                    or isinstance(self.selected_image, FogProjectImage) or isinstance(self.selected_image, RedoRescueImage) \
                    or isinstance(self.selected_image, FoxcloneImage) or isinstance(self.selected_image, ApartGtkImage):
                # Clonezilla images support gzip bzip2 lzo lzma xz lzip lrzip lz4 zstd and uncompressed.
                if 'absolute_filename_glob_list' not in image.image_format_dict_dict[
                        partition_key].keys():
                    GLib.idle_add(callback, False,
                                  "No associated image files found")
                    GLib.idle_add(please_wait_popup.destroy)
                    return
                image_file_list = image.image_format_dict_dict[partition_key][
                    'absolute_filename_glob_list']
                compression = image.image_format_dict_dict[partition_key][
                    'compression']
                incompatible_image_message = ""
                if compression != "gzip" and compression != "xz" and compression != "uncompressed":
                    incompatible_image_message = "Currently only supports gzip, xz and uncompressed images, not: " + compression
                    incompatible_image_message += "\nSupport for more compression types coming in a future release."
                if 'type' in image.image_format_dict_dict[partition_key].keys(
                ):
                    image_type = image.image_format_dict_dict[partition_key][
                        'type']
                    if image_type != 'partclone':
                        incompatible_image_message = "Currently only supports partclone images, not: " + image_type
                        incompatible_image_message += "\nSupport for ntfsclone images coming in a future release."
                if incompatible_image_message != "":
                    GLib.idle_add(
                        callback, False,
                        "Image Explorer (beta) failed to mount image:\n\n" +
                        incompatible_image_message)
                    GLib.idle_add(please_wait_popup.destroy)
                    return
            elif isinstance(image, QemuImage):
                image.associate_nbd(QEMU_NBD_NBD_DEVICE)
                base_device_node, partition_number = Utility.split_device_string(
                    partition_key)
                mount_device = Utility.join_device_string(
                    QEMU_NBD_NBD_DEVICE, partition_number)
            if not isinstance(image, QemuImage):
                # Concatenate the split partclone images into a virtual block device using nbdkit. This step is fast
                # because it's just logical mapping within the nbdkit process. In other words "lazy-evaulation: no
                # concatenation is actually occurring.
                #
                # nbdkit's "--filter=" arguments can be used to decompress requested blocks on-the-fly in a single nbdkit
                # invokation. However for flexibility with the older nbdkit version in Ubuntu 20.04 (see below),
                # and the limited number of compression filters even in recent nbdkit versions, Rescuezilla does this in
                # two steps: joining the files is a different step to decompressing the files. This approach provides
                # greater flexibility for alternative decompression utilities such as perhaps `archivemount` or AVFS.

                # Launches nbdkit using the 'split' plugin (`man nbdkit-split-plugin`) to concatenate the files.
                # The concatenated compressed image file is almost certainly not aligned to a 512-byte or 4096-byte block
                # boundary. To force the partially-filled final block to always be served up, using the truncate filter
                # (`man nbdkit-truncate-filter`) with the "round-up" parameter to pad that final block with zeroes.
                nbdkit_join_cmd_list = [
                    "nbdkit", "--no-fork", "--readonly", "--filter=truncate",
                    "split"
                ] + image_file_list + ["round-up=512"]
                flat_command_string = Utility.print_cli_friendly(
                    "Create nbd server using nbdkit to dynamically concatenate (possibly hundreds) of 4 gigabyte pieces (and zero-pad the final partially-filled block to the 512-byte block boundary)",
                    [nbdkit_join_cmd_list])
                GLib.idle_add(
                    please_wait_popup.set_secondary_label_text,
                    "Joining all the split image files (this may take a while) (step 2/5"
                )
                # Launch the server (long-lived process so PID/exit code/stdout/stderr management is especially important)
                nbdkit_join_process = subprocess.Popen(nbdkit_join_cmd_list,
                                                       stdout=subprocess.PIPE,
                                                       env=env,
                                                       encoding='utf-8')
                print("Adding join process with pid " +
                      str(nbdkit_join_process.pid) + " to queue")
                self.nbdkit_join_process_queue.put(nbdkit_join_process)

                if self._check_stop_and_cleanup(please_wait_popup, callback,
                                                destination_path):
                    return

                # Connect the /dev/nbdN device node to the NBD server launched earlier.
                nbdclient_connect_cmd_list = [
                    "nbd-client", "-block-size", "512", "localhost",
                    JOINED_FILES_NBD_DEVICE
                ]
                is_success, message = Utility.retry_run(
                    short_description=
                    "Associating the nbdkit server process being used for dynamic CONCATENATION with the nbd device node: "
                    + JOINED_FILES_NBD_DEVICE,
                    cmd_list=nbdclient_connect_cmd_list,
                    expected_error_msg=
                    "Error: Socket failed: Connection refused",
                    retry_interval_seconds=1,
                    timeout_seconds=5,
                    is_shutdown_fn=self.is_stop_requested)
                if not is_success:
                    failed_message = message
                    is_unmount_success, unmount_failed_message = self._do_unmount(
                        destination_path, self.nbdkit_join_process_queue,
                        self.nbdkit_decompress_process_queue,
                        self.partclone_nbd_process_queue)
                    if not is_unmount_success:
                        failed_message += "\n\n" + unmount_failed_message
                    GLib.idle_add(callback, False, failed_message)
                    GLib.idle_add(please_wait_popup.destroy)
                    return

                if self._check_stop_and_cleanup(please_wait_popup, callback,
                                                destination_path):
                    return

                nbd_compression_filter_and_plugin = []
                if "xz" == compression:
                    # In the xz case, use the file plugin and the xz filter
                    nbd_compression_filter_and_plugin = ["--filter=xz", "file"]
                elif "gzip" == compression:
                    # In the gzip case, use the gzip plugin. This is because Ubuntu 20.04 (Focal) still use nbdkit v1.16
                    # which had not yet removed the gzip plugin and replaced it with a gzip filter [1] [2]
                    # [1] https://bugs.launchpad.net/ubuntu/+source/nbdkit/+bug/1904554
                    # [2] https://packages.ubuntu.com/focal/nbdkit
                    nbd_compression_filter_and_plugin = ["gzip"]
                elif "uncompressed" == compression:
                    # In the uncompressed case, use the file plugin without any compression filters (passthrough). This
                    # unnecessary extra layer for uncompressed data is inefficient in theory (and possibly in practice) but
                    # this approach makes the code simpler and the uncompressed case is still way faster than compressed.
                    nbd_compression_filter_and_plugin = ["file"]
                else:
                    # Clonezilla still has more compression formats: bzip2 lzo lzma lzip lrzip lz4 zstd
                    # TODO: This codepath shouldn't ever be hit currently due to being dealt with earlier.
                    # TODO: Use the FUSE-based tools archivemount and/or AVFS to provide at least some basic/slow fallback
                    # TODO: support for these compression formats widely used by Expert Mode Clonezilla users.
                    # During testing there was a 'permission denied' error using archivemount with nbd block devices,
                    # even after settings the correct configuration in /etc/fuse.conf and using `-o allow_root`.
                    is_unmount_success, unmount_failed_message = self._do_unmount(
                        destination_path, self.nbdkit_join_process_queue,
                        self.nbdkit_decompress_process_queue,
                        self.partclone_nbd_process_queue)
                    if not is_unmount_success:
                        failed_message += "\n\n" + unmount_failed_message
                    failed_message = "Image Explorer (beta) doesn't yet support image compression: " + compression
                    GLib.idle_add(callback, False, failed_message)
                    GLib.idle_add(please_wait_popup.destroy)
                    return

                # Launches nbdkit using eg, the 'file' plugin (`man nbdkit-file-plugin`) with a specific decompression
                # filter (eg, --filter=xz) to dynamically decompress the concatenated image being served on an /dev/nbdN
                # device node. Again using the truncate filter to zero pad to the nearest 512-byte block boundary.
                # Uses a non-standard port to avoid conflicting with earlier nbdkit usage.
                port = "10810"
                nbdkit_decompress_cmd_list = [
                    "nbdkit", "--no-fork", "--readonly", "--port", "10810",
                    "--filter=truncate"
                ] + nbd_compression_filter_and_plugin + [
                    "file=" + JOINED_FILES_NBD_DEVICE, "round-up=512"
                ]
                flat_command_string = Utility.print_cli_friendly(
                    "Create nbd server using nbdkit to dynamically decompress the concatenated image (and zero-pad the final partially-filled block to the 512-byte block boundary)",
                    [nbdkit_decompress_cmd_list])
                GLib.idle_add(
                    please_wait_popup.set_secondary_label_text,
                    "Decompressing the combined partclone image file (this may take while) (step 3/5)"
                )
                nbdkit_decompress_process = subprocess.Popen(
                    nbdkit_decompress_cmd_list,
                    stdout=subprocess.PIPE,
                    env=env,
                    encoding='utf-8')

                if self._check_stop_and_cleanup(please_wait_popup, callback,
                                                destination_path):
                    return
                print("Adding decompress process with pid " +
                      str(nbdkit_decompress_process.pid) + " to queue")
                self.nbdkit_decompress_process_queue.put(
                    nbdkit_decompress_process)

                nbdclient_connect_cmd_list = [
                    "nbd-client", "-block-size", "512", "localhost", port,
                    DECOMPRESSED_NBD_DEVICE
                ]
                is_success, message = Utility.retry_run(
                    short_description=
                    "Associating the nbdkit server process being used for dynamic DECOMPRESSION with the nbd device node. For gzip data this may take a while (as it effectively decompresses entire archive): "
                    + DECOMPRESSED_NBD_DEVICE,
                    cmd_list=nbdclient_connect_cmd_list,
                    expected_error_msg=
                    "Error: Socket failed: Connection refused",
                    retry_interval_seconds=1,
                    timeout_seconds=5,
                    is_shutdown_fn=self.is_stop_requested)

                if not is_success:
                    failed_message = message
                    is_unmount_success, unmount_failed_message = self._do_unmount(
                        destination_path, self.nbdkit_join_process_queue,
                        self.nbdkit_decompress_process_queue,
                        self.partclone_nbd_process_queue)
                    if not is_unmount_success:
                        failed_message += "\n\n" + unmount_failed_message
                    GLib.idle_add(callback, False, failed_message)
                    GLib.idle_add(please_wait_popup.destroy)
                    return

                if self._check_stop_and_cleanup(please_wait_popup, callback,
                                                destination_path):
                    return

                # TODO: Handle ntfsclone via partclone-utils' imagemount
                # TODO: Some dd images may be accessible using standard `mount` cal)l
                partclone_nbd_mount_cmd_list = [
                    "partclone-nbd", "-d", MOUNTABLE_NBD_DEVICE, "-c",
                    DECOMPRESSED_NBD_DEVICE
                ]
                partclone_nbd_flat_command_string = Utility.print_cli_friendly(
                    "Processing partclone image with partclone-nbd:",
                    [partclone_nbd_mount_cmd_list])

                GLib.idle_add(
                    please_wait_popup.set_secondary_label_text,
                    "Processing image with partclone-nbd (this may take a while) (step 4/5)"
                )
                partclone_nbd_process = subprocess.Popen(
                    partclone_nbd_mount_cmd_list,
                    stdout=subprocess.PIPE,
                    stderr=subprocess.PIPE,
                    env=env)

                print("Adding partclone-nbd process with pid " +
                      str(partclone_nbd_process.pid) + " to queue")
                self.partclone_nbd_process_queue.put(partclone_nbd_process)
                # Sentinal value for successful partclone-nbd mount. partclone-nbd is launched with C locale env, so this
                # string will match even for non-English locales.
                partclone_nbd_ready_msg = "[ INF ] Waiting for requests ..."

                # Poll the partclone-nbd stdout until the ready message has been received.
                # This relies on partclone-nbd exiting immediately if an error is generated, if not, an infinite loop will
                # result until the timeout is hit.
                # TODO: A future translation of partclone-nbd may break this on non-English languages
                mounted_successfully = False
                queue = Utility.nonblocking_subprocess_pipe_queue(
                    partclone_nbd_process)
                partclone_nbd_output_list = []
                while True:
                    if self.is_stop_requested():
                        partclone_nbd_process.terminate()
                        # FIXME: Might need longer sleep?
                        sleep(1)
                        partclone_nbd_process.kill()
                        self._check_stop_and_cleanup(please_wait_popup,
                                                     callback,
                                                     destination_path)
                        return
                    try:
                        line = queue.get(timeout=0.1)
                    except Empty:
                        continue
                    else:
                        print(line)
                        line = line.decode("utf-8")
                        partclone_nbd_output_list += [line]
                        line = line.strip()
                        m = utility.REMatcher(line)
                        if m.match(r".*Waiting for requests.*"):
                            print("Detected partclone-nbd mount is ready!")
                            mounted_successfully = True
                            break
                    rc = partclone_nbd_process.poll()
                    if rc is not None:
                        break

                if not mounted_successfully:
                    partclone_nbd_flat_command_string = ""
                    for line in partclone_nbd_output_list:
                        partclone_nbd_flat_command_string += line
                    failed_message = "Unable to process image using partclone-nbd:\n\n" + partclone_nbd_flat_command_string + "\n\n"
                    if self.is_stop_requested():
                        self._do_unmount_wrapper(please_wait_popup, callback,
                                                 destination_path)
                        failed_message += "\n" + _(
                            "User requested operation to stop.")
                    is_unmount_success, unmount_failed_message = self._do_unmount(
                        destination_path, self.nbdkit_join_process_queue,
                        self.nbdkit_decompress_process_queue,
                        self.partclone_nbd_process_queue)
                    if not is_unmount_success:
                        failed_message += "\n\n" + unmount_failed_message
                    GLib.idle_add(callback, False, failed_message)
                    GLib.idle_add(please_wait_popup.destroy)
                    return

                if self._check_stop_and_cleanup(please_wait_popup, callback,
                                                destination_path):
                    return

            mount_cmd_list = [
                "mount", "-o", "ro", mount_device, destination_path
            ]
            GLib.idle_add(please_wait_popup.set_secondary_label_text,
                          "Mounting image (this may take a while) (step 5/5)")
            process, flat_command_string, failed_message = Utility.interruptable_run(
                "Mount ",
                mount_cmd_list,
                use_c_locale=False,
                is_shutdown_fn=self.is_stop_requested)
            if process.returncode != 0:
                is_unmount_success, unmount_failed_message = self._do_unmount(
                    destination_path, self.nbdkit_join_process_queue,
                    self.nbdkit_decompress_process_queue,
                    self.partclone_nbd_process_queue)
                if not is_unmount_success:
                    failed_message += "\n\n" + unmount_failed_message
                GLib.idle_add(callback, False, failed_message)
                GLib.idle_add(please_wait_popup.destroy)
                return

            if self._check_stop_and_cleanup(please_wait_popup, callback,
                                            destination_path):
                return

            backup_timeend = datetime.now()
            duration_minutes = Utility.get_human_readable_minutes_seconds(
                (backup_timeend - backup_timestart).total_seconds())
            duration_message = _("Operation took {num_minutes} minutes."
                                 ).format(num_minutes=duration_minutes)
            GLib.idle_add(callback, True, duration_message)
            GLib.idle_add(please_wait_popup.destroy)
        except Exception as e:
            tb = traceback.format_exc()
            print(tb)
            GLib.idle_add(callback, False, "Error mounting folder: " + tb)
            GLib.idle_add(please_wait_popup.destroy)
            # Cleanup just in case
            is_unmount_success, unmount_failed_message = self._do_unmount(
                destination_path, self.nbdkit_join_process_queue,
                self.nbdkit_decompress_process_queue,
                self.partclone_nbd_process_queue)
            if not is_unmount_success:
                print("Unmount failed " + unmount_failed_message)
コード例 #11
0
ファイル: parted.py プロジェクト: rescuezilla/rescuezilla
    def parse_parted_output(parted_output):
        """Converts the STDOUT of `fsarchiver probe detailed` into a dictionary. See unit tests for a complete example"""
        parted_dict = collections.OrderedDict([])
        parted_dict['partitions'] = collections.OrderedDict([])

        initial_split = re.split(r"Number\s*Start\s*End\s*Size.*Flags", parted_output)
        #print("looking at " + str(initial_split) )
        metadata_list = initial_split[0].splitlines()
        for metadata_line in metadata_list:
            #print("looking at line " + metadata_line)
            m = utility.REMatcher(metadata_line)
            if metadata_line.strip() == "":
                # Skip this line
                continue
            # Processing: Model: ATA VBOX HARDDISK (scsi)
            elif m.match(r"Model: (.+)"):
                parted_dict['model'] = m.group(1)
            # Processing: Disk /dev/sdc: 2147483648B
            elif m.match(r"Disk ([a-zA-Z0-9_/]+): ([0-9]+)([a-zA-Z]+)"):
                parted_dict['long_dev_node'] = m.group(1)
                parted_dict['capacity'] = int(m.group(2))
                parted_dict['units'] = m.group(3)
            # Processing: Sector size (logical/physical): 512B/512B
            elif m.match(r"Sector size .logical/physical.: ([0-9]+)[a-zA-Z]+/([0-9]+)[a-zA-Z]+"):
                parted_dict['logical_sector_size'] = int(m.group(1))
                parted_dict['physical_sector_size'] = int(m.group(2))
            # Processing: Partition Table: gpt
            elif m.match(r"Partition Table: ([a-zA-Z]+)"):
                parted_dict['partition_table'] = m.group(1)
            # Processing: Disk Flags:
            elif m.match(r"Disk Flags:(.*)"):
                parted_dict['flags'] = m.group(1)
            else:
                print("Could not process: " + metadata_line)

        if len(initial_split) > 1:
            partition_list = initial_split[1].splitlines()
            for partition_line in partition_list:
                if partition_line.strip() == "":
                    continue
                if parted_dict['partition_table'] == "gpt":
                    # Number  Start       End          Size         File system     Name  Flags
                    #  1      1048576B    65011711B    63963136B    ext4
                    column_title = re.search("Number\s+Start\s+End\s+Size\s+File system\s+Name\s+Flags", parted_output).group(0)
                    partition_line = partition_line.ljust(len(column_title), " ")
                    # Pad the line with spaces to allow splicing.
                    partition_number = int(partition_line[0:column_title.index("Start") - 1].strip())
                    parted_dict['partitions'][partition_number] = collections.OrderedDict()
                    #print("Looking at line " + partition_line)
                    parted_dict['partitions'][partition_number]['start'] = int(
                        partition_line[column_title.index("Start"):column_title.index("End") - 1].strip()[:-1])
                    parted_dict['partitions'][partition_number]['end'] = int(
                        partition_line[column_title.index("End"):column_title.index("Size") - 1].strip()[:-1])
                    parted_dict['partitions'][partition_number]['size'] = int(
                        partition_line[column_title.index("Size"):column_title.index("File system") - 1].strip()[:-1])
                    parted_dict['partitions'][partition_number]['filesystem'] = partition_line[column_title.index(
                        "File system"):column_title.index("Name") - 1].strip()
                    parted_dict['partitions'][partition_number]['name'] = partition_line[
                                                                     column_title.index("Name"):column_title.index(
                                                                         "Flags") - 1].strip()
                    parted_dict['partitions'][partition_number]['flags'] = partition_line[column_title.index("Flags"):len(
                        partition_line)].strip()

                # Number  Start       End          Size         Type      File system     Flags
                #  1      1048576B    91226111B    90177536B    primary   ext4
                elif parted_dict['partition_table'] == "msdos":
                    column_title = re.search("Number\s+Start\s+End\s+Size\s+Type\s+File system\s+Flags", parted_output).group(0)
                    partition_line = partition_line.ljust(len(column_title), " ")
                    # Pad the line with spaces to allow splicing.
                    partition_number = int(partition_line[0:column_title.index("Start") - 1].strip())
                    parted_dict['partitions'][partition_number] = collections.OrderedDict()
                    #print("Looking at line " + partition_line)
                    parted_dict['partitions'][partition_number]['start'] = int( partition_line[column_title.index("Start"):column_title.index("End") - 1].strip()[:-1])
                    parted_dict['partitions'][partition_number]['end'] = int( partition_line[column_title.index("End"):column_title.index("Size") - 1].strip()[:-1])
                    parted_dict['partitions'][partition_number]['size'] = int( partition_line[column_title.index("Size"):column_title.index("Type") - 1].strip()[:-1])
                    parted_dict['partitions'][partition_number]['type'] = partition_line[column_title.index("Type"):column_title.index("File system") - 1].strip()
                    parted_dict['partitions'][partition_number]['filesystem'] = partition_line[column_title.index( "File system"):column_title.index("Flags") - 1].strip()
                    parted_dict['partitions'][partition_number]['flags'] = partition_line[column_title.index("Flags"):len( partition_line)].strip()
                else:
                    print("Could not process: " + partition_line)
        return parted_dict
コード例 #12
0
ファイル: partclone.py プロジェクト: xiogun/rescuezilla
    def parse_partclone_info_output(partclone_info_output):
        partclone_info_dict = collections.OrderedDict([])
        for line in partclone_info_output.splitlines():
            if line.strip() == "":
                continue
            elif line.startswith("Partclone v"):
                # Ignore the line Partclone v0.3.13 http://partclone.org
                continue
            elif line.startswith("Showing info of image (-)"):
                # Ignore the line
                continue
            else:
                m = utility.REMatcher(line)
                if m.match("File system:\s+([a-zA-Z0-9+]+)"):
                    partclone_info_dict['filesystem'] = m.group(1)
                elif m.match(
                        r"Device size:\s+([a-zA-Z0-9+\s\.]+) = ([0-9]+) Blocks"
                ):
                    partclone_info_dict['size'] = {
                        'enduser_readable': m.group(1),
                        'blocks': int(m.group(2).strip()),
                    }
                elif m.match(
                        r"Space in use:\s+([a-zA-Z0-9+\s\.]+) = ([0-9]+) Blocks"
                ):
                    partclone_info_dict['used_space'] = {
                        'enduser_readable': m.group(1),
                        'blocks': int(m.group(2).strip()),
                    }
                elif m.match(
                        r"Free Space:\s+([a-zA-Z0-9+\s\.]+) = ([0-9]+) Blocks"
                ):
                    partclone_info_dict['free_space'] = {
                        'enduser_readable': m.group(1),
                        'blocks': int(m.group(2).strip()),
                    }
                elif m.match("Block size:\s+([0-9+]+)"):
                    partclone_info_dict['block_size'] = int(m.group(1))
                elif m.match(r"image format:\s+([0-9a-zA-Z/]+)"):
                    partclone_info_dict['image_format'] = m.group(1)
                elif m.match(r"created on a:\s+([a-zA-Z/]+)"):
                    partclone_info_dict['created'] = m.group(1)
                elif m.match(r"with partclone:\s+([a-zA-Z/]+)"):
                    partclone_info_dict['with_partclone'] = m.group(1)
                elif m.match(r"bitmap mode:\s+([a-zA-Z/]+)"):
                    partclone_info_dict['bitmap_mode'] = m.group(1)
                elif m.match(r"checksum algo:\s+([a-zA-Z0-9_/]+)"):
                    partclone_info_dict['checksum_algo'] = m.group(1)
                elif m.match(r"checksum size:\s+([0-9]+)"):
                    partclone_info_dict['checksum_size'] = int(m.group(1))
                elif m.match(r"blocks/checksum:\s+([0-9]+)"):
                    partclone_info_dict['blocks/checksum'] = int(m.group(1))
                elif m.match(r"reseed checksum:\s+([a-zA-Z]+)"):
                    partclone_info_dict['reseed_checksum'] = "False"
                else:
                    print("Not yet interpreting partclone output: " + line)

        # Calculate the byte sizes for partition size, used space and free space.
        if 'block_size' in partclone_info_dict.keys():
            bs = partclone_info_dict['block_size']
            if 'size' in partclone_info_dict.keys():
                partclone_info_dict['size'][
                    'bytes'] = bs * partclone_info_dict['size']['blocks']
            if 'free_space' in partclone_info_dict.keys():
                partclone_info_dict['free_space'][
                    'bytes'] = bs * partclone_info_dict['free_space']['blocks']
            if 'used_space' in partclone_info_dict.keys():
                partclone_info_dict['used_space'][
                    'bytes'] = bs * partclone_info_dict['used_space']['blocks']
        return partclone_info_dict
コード例 #13
0
 def extract_bytes_from_string(value):
     m = utility.REMatcher(value)
     if m.match(r"^.*\(([0-9]*) bytes\).*$"):
         return int(m.group(1))
     else:
         return 0