def test_unpack_registery(self):

        formats = get_unpack_formats()

        def _boo(filename, extract_dir, extra):
            self.assertEquals(extra, 1)
            self.assertEquals(filename, 'stuff.boo')
            self.assertEquals(extract_dir, 'xx')

        register_unpack_format('Boo', ['.boo', '.b2'], _boo, [('extra', 1)])
        unpack_archive('stuff.boo', 'xx')

        # trying to register a .boo unpacker again
        self.assertRaises(RegistryError, register_unpack_format, 'Boo2',
                          ['.boo'], _boo)

        # should work now
        unregister_unpack_format('Boo')
        register_unpack_format('Boo2', ['.boo'], _boo)
        self.assertIn(('Boo2', ['.boo'], ''), get_unpack_formats())
        self.assertNotIn(('Boo', ['.boo'], ''), get_unpack_formats())

        # let's leave a clean state
        unregister_unpack_format('Boo2')
        self.assertEquals(get_unpack_formats(), formats)
Beispiel #2
0
    def test_unpack_registery(self):

        formats = get_unpack_formats()

        def _boo(filename, extract_dir, extra):
            self.assertEqual(extra, 1)
            self.assertEqual(filename, 'stuff.boo')
            self.assertEqual(extract_dir, 'xx')

        register_unpack_format('Boo', ['.boo', '.b2'], _boo, [('extra', 1)])
        unpack_archive('stuff.boo', 'xx')

        # trying to register a .boo unpacker again
        self.assertRaises(RegistryError, register_unpack_format, 'Boo2',
                          ['.boo'], _boo)

        # should work now
        unregister_unpack_format('Boo')
        register_unpack_format('Boo2', ['.boo'], _boo)
        self.assertIn(('Boo2', ['.boo'], ''), get_unpack_formats())
        self.assertNotIn(('Boo', ['.boo'], ''), get_unpack_formats())

        # let's leave a clean state
        unregister_unpack_format('Boo2')
        self.assertEqual(get_unpack_formats(), formats)
Beispiel #3
0
def test_register_new_archive_formats(prepare_shutil_state):
    """Registering new archive formats should be fine"""
    unpack_formats_v1 = [f[0] for f in shutil.get_unpack_formats()]
    for format_id in tarball.ADDITIONAL_ARCHIVE_FORMATS:
        assert format_id[0] not in unpack_formats_v1

    # when
    tarball.register_new_archive_formats()

    # then
    unpack_formats_v2 = [f[0] for f in shutil.get_unpack_formats()]
    for format_id in tarball.ADDITIONAL_ARCHIVE_FORMATS:
        assert format_id[0] in unpack_formats_v2
Beispiel #4
0
def _remove_compressed_file_endings(toolchain_uri: str):
    unpack_extensions = ['.tar.bz2', '.bz2', '.tar.gz', '.tgz', '.tar', '.zip'] + \
                        sum(map(lambda info: info[1], shutil.get_unpack_formats()), [])
    for extension in reversed(sorted(unpack_extensions, key=len)):
        if toolchain_uri.endswith(extension):
            return toolchain_uri.replace(extension, '')
    return toolchain_uri
Beispiel #5
0
 def getFileExtension(self):
     availableFormats = get_unpack_formats()
     for format in availableFormats:
         if format[0] == self.config['format']:
             ## TODO: Refactor to allow check on any possible format
             return format[1][0]
     return ''
Beispiel #6
0
def is_compressed(file):
    """ Check if file is compresses in zip, tar or rar format """
    filename, file_extension = os.path.splitext(file)
    return file_extension in [
        format for unpack_format in shutil.get_unpack_formats()
        for format in unpack_format[1]
    ]
def shutil_get_unpack_formats():
    """返回当前系统支持的解包格式"""
    # exts表示归档格式对应的后缀名
    for formats, exts, description in shutil.get_unpack_formats():
        print("{:5}: {}, names ending in {}".format(formats, description,
                                                    exts))
    r"""
Beispiel #8
0
    async def import_(self, ctx: commands.Context, source: str = None):
        """
        Imports sounds from an archive. Sounds are named the name of the file in the archive.
        Supports .zip, .tar, .tar.gz, .tgz, .tar.bz2, .tbz2, .tar.xz, and .txz archives.

        :param source: Download link to an archive. Can be omitted if archive is uploaded as an attachment.
        """
        if source is None:
            try:
                source = ctx.message.attachments[0].url
            except (IndexError, KeyError):
                raise exceptions.NoDownload()

        with tempfile.NamedTemporaryFile('wb+') as f, tempfile.TemporaryDirectory() as d:

            async with aiohttp.ClientSession() as session:
                async with session.get(source) as resp:
                    filename = resp.url.name

                    # this might block but i don't care.
                    while True:
                        chunk = await resp.content.read(1 << 20)  # 1 MB
                        if not chunk:
                            break
                        f.write(chunk)

            f.seek(0)  # this probably doesn't matter

            format = None
            for fmt in shutil.get_unpack_formats():
                for extension in fmt[1]:
                    if filename.endswith(extension):
                        format = fmt[0]
                        break
                # this weird stuff is because python has no syntax for
                # breaking out of multiple loops.
                else:
                    continue
                break

            shutil.unpack_archive(f.name, d, format=format)
            f.close()

            succeeded = []
            failed = []
            for path in Path(d).glob('**/*'):
                if not path.is_file():
                    continue
                try:
                    await self._add(ctx, path.name, source, path, unlink=False)
                    succeeded.append(path.name)
                except FileExistsError:
                    failed.append(path.name)
            msg = f'{len(succeeded)} imported. {len(failed)} failed.'
            if failed:
                msg += '\nFailed imports:\n'
                msg += '\n'.join(failed)
            await ctx.send(msg)
Beispiel #9
0
def register_new_archive_formats():
    """Register new archive formats to uncompress

    """
    registered_formats = [f[0] for f in shutil.get_unpack_formats()]
    for name, extensions, function in ADDITIONAL_ARCHIVE_FORMATS:
        if name in registered_formats:
            continue
        shutil.register_unpack_format(name, extensions, function)
Beispiel #10
0
 def __init__(self, *args, **kwargs):
     super(UnarchiveActivity, self).__init__(*args, **kwargs)
     targetPath = args[0]
     print("Extracting archives into %s" % targetPath)
     self.__threads = []
     webdav_url = self.doors('https', ['dcache-view'])
     self.__download_url = webdav_url
     self.__target_url = urljoin(webdav_url, targetPath + '/')
     self.__extensions = [e for f in shutil.get_unpack_formats() for e in f[1]]
Beispiel #11
0
def get_sdist_extensions():
    if SDIST_EXTENSIONS:
        return SDIST_EXTENSIONS

    for format in shutil.get_unpack_formats():
        for ext in format[1]:
            SDIST_EXTENSIONS.append(ext)

    return SDIST_EXTENSIONS
Beispiel #12
0
def get_format(format):
    for (fmt, extensions, _) in shutil.get_unpack_formats():
        if format == fmt:
            return fmt
        if format in extensions:
            return fmt
        if "." + format in extensions:
            return fmt
    raise ValueError(f"Unrecognized format {format}")
Beispiel #13
0
 def check_is_file_arc(name_arc):
     """By file name returns information on whether it is a valid archive or not."""
     for name, format_of_arc in dict([
         (x[0], x[1]) for x in shutil.get_unpack_formats()
     ]).items():
         for el in format_of_arc:
             if el in name_arc:
                 return True
     else:
         return False
Beispiel #14
0
def prepare_shutil_state():
    """Reset any shutil modification in its current state"""
    import shutil

    registered_formats = [f[0] for f in shutil.get_unpack_formats()]
    for format_id in tarball.ADDITIONAL_ARCHIVE_FORMATS:
        name = format_id[0]
        if name in registered_formats:
            shutil.unregister_unpack_format(name)

    return shutil
Beispiel #15
0
 def extract_arhcive(name_arc, dir_name_out):
     try:
         for name, format_of_arc in dict([
             (x[0], x[1]) for x in shutil.get_unpack_formats()
         ]).items():
             for el in format_of_arc:
                 if el in name_arc:
                     shutil.unpack_archive(name_arc, dir_name_out, name)
     except Exception as e:
         return False, e
     else:
         return True, "ok"
Beispiel #16
0
    def unpack_archive(self, archive, chdir=True):
        """ An archive is unpacked in the current directory.  If requested its
        top level directory becomes the current directory.  The name of the
        directory (not it's pathname) is returned.
        """

        # Windows (maybe just 32-bits) has a problem extracting the Qt source
        # archive (maybe the long pathnames).  As a work around we extract it
        # to the directory containing the archive and then move it later.
        archive_dir, archive_name = os.path.split(archive)
        original_cwd = os.getcwd()
        os.chdir(archive_dir)

        # Unpack the archive.
        try:
            shutil.unpack_archive(archive_name)
        except Exception as e:
            self.error("unable to unpack {0}".format(archive), detail=str(e))

        # Assume that the name of the extracted directory is the same as the
        # archive without the extension.
        archive_root = None
        for _, extensions, _ in shutil.get_unpack_formats():
            for ext in extensions:
                if archive_name.endswith(ext):
                    archive_root = archive_name[:-len(ext)]
                    break

            if archive_root:
                break
        else:
            # This should never happen if we have got this far.
            self.error("'{0}' has an unknown extension".format(archive))

        # Validate the assumption by checking the expected directory exists.
        if not os.path.isdir(archive_root):
            self.error(
                "unpacking {0} did not create a directory called '{1}' as expected"
                .format(archive, archive_root))

        # Move the extracted archive.
        archive_root_path = os.path.join(original_cwd, archive_root)
        self.delete_dir(archive_root_path)
        os.rename(archive_root, archive_root_path)
        os.chdir(original_cwd)

        # Change to the extracted directory if required.
        if chdir:
            os.chdir(archive_root)

        # Return the directory name which the component plugin will often use
        # to extract version information.
        return archive_root
Beispiel #17
0
    def unpack_archive(self, archive, chdir=True):
        """ An archive is unpacked in the current directory.  If requested its
        top level directory becomes the current directory.  The name of the
        directory (not it's pathname) is returned.
        """

        # Windows has a problem extracting the Qt source archive (probably the
        # long pathnames).  As a work around we copy it to the current
        # directory and extract it from there.
        self.copy_file(archive, '.')
        archive_name = os.path.basename(archive)

        # Unpack the archive.
        self.verbose("Unpacking '{}'".format(archive_name))

        try:
            shutil.unpack_archive(archive_name)
        except Exception as e:
            self.error("unable to unpack {0}".format(archive_name),
                       detail=str(e))

        # Assume that the name of the extracted directory is the same as the
        # archive without the extension.
        archive_root = None
        for _, extensions, _ in shutil.get_unpack_formats():
            for ext in extensions:
                if archive_name.endswith(ext):
                    archive_root = archive_name[:-len(ext)]
                    break

            if archive_root:
                break
        else:
            # This should never happen if we have got this far.
            self.error("'{0}' has an unknown extension".format(archive))

        # Validate the assumption by checking the expected directory exists.
        if not os.path.isdir(archive_root):
            self.error(
                "unpacking {0} did not create a directory called '{1}' as expected"
                .format(archive_name, archive_root))

        # Delete the copied archive.
        os.remove(archive_name)

        # Change to the extracted directory if required.
        if chdir:
            os.chdir(archive_root)

        # Return the directory name which the component plugin will often use
        # to extract version information.
        return archive_root
Beispiel #18
0
def expandArchives(fs_root):
    archive_extensions = []
    for _, file_types, _ in shutil.get_unpack_formats():
        archive_extensions.extend(file_types)
    for root, dirs, files in os.walk(fs_root):
        for file in files:
            name, ext = os.path.splitext(file)
            if ext in archive_extensions:
                archive_file = os.path.join(root, file)
                extract_dir = os.path.join(root, name + ".extracted")
                os.makedirs(extract_dir, exist_ok=True)
                shutil.unpack_archive(archive_file, extract_dir=extract_dir)
                dirs.append(extract_dir)
Beispiel #19
0
    def test_unpack_registery(self) -> None:

        formats = get_unpack_formats()

        def _boo(filename: str, extract_dir: str, extra: int) -> None:
            self.assertEqual(extra, 1)
            self.assertEqual(filename, "stuff.boo")
            self.assertEqual(extract_dir, "xx")

        register_unpack_format("Boo", [".boo", ".b2"], _boo, [("extra", 1)])
        unpack_archive("stuff.boo", "xx")

        # trying to register a .boo unpacker again
        self.assertRaises(RegistryError, register_unpack_format, "Boo2", [".boo"], _boo)

        # should work now
        unregister_unpack_format("Boo")
        register_unpack_format("Boo2", [".boo"], _boo)
        self.assertIn(("Boo2", [".boo"], ""), get_unpack_formats())
        self.assertNotIn(("Boo", [".boo"], ""), get_unpack_formats())

        # let's leave a clean state
        unregister_unpack_format("Boo2")
        self.assertEqual(get_unpack_formats(), formats)
Beispiel #20
0
def unarchive_if_possible(dlto, year_dir):
    '''Unarchives a file if possible, returns True if it did.'''
    unzip_exts = []
    _uf = shutil.get_unpack_formats()
    for i in _uf:
        unzip_exts += i[1]

    dl_ext = os.path.splitext(dlto)[1]
    #print(dlto, dl_ext, unzip_exts)
    if os.path.exists(dlto) and (dl_ext in unzip_exts):
        shutil.unpack_archive(dlto, year_dir)
        os.remove(dlto)
        print("Unarchived", dlto)
        return True

    return False
Beispiel #21
0
def _extract_sdist(pypi_metadata: Dict[str, Any]) -> Dict:
    """Get sdist file path from the meta-data"""
    sdist_extensions = tuple(extension
                             for (name, extensions,
                                  description) in shutil.get_unpack_formats()
                             for extension in extensions)

    # The first one we can use. Usually a .tar.gz
    for entry in pypi_metadata["urls"]:
        if entry["filename"].endswith(sdist_extensions):
            return entry

    raise Exception("No sdist URL found for package %s (%s)" % (
        pypi_metadata["info"].get("name"),
        pypi_metadata["info"].get("package_url"),
    ))
Beispiel #22
0
def init_additional_unpackers():
  """Add external libraries for unpacking files.

  Checks if `7z` or `unrar` are installed on the host system.
  """
  if try_cmd('7z'):
    register_unpack_format('7zip', [
      '.zipx', '.gz', '.z', '.cab',
      '.rar', '.lzh', '.7z', '.xz'
    ], un7z)
  elif try_cmd('unrar'):
    register_unpack_format('unrar', ['.rar'], unrar)

  formats = get_unpack_formats()
  formats = list(map(lambda item: item[1], formats))
  formats = [item for sublist in formats for item in sublist]
  download.unpack_formats = formats
Beispiel #23
0
    def download_and_install(link, font, fontdir):
        # might need to spoof the user agent here
        with urllib.request.urlopen(link) as response:
            with tempfile.NamedTemporaryFile(delete=False) as tmp_file:
                shutil.copyfileobj(response, tmp_file)

            fname = font.replace(" ", "_")
            if "Content-Disposition" in response.info():
                hh = cgi.parse_header(response.info()["Content-Disposition"])
                fname = os.path.splitext(hh[1]["filename"])[0]

            magic = subprocess.run(
                ["file", tmp_file.name], stdout=subprocess.PIPE, universal_newlines=True
            ).stdout.strip()
            printv(magic)

            for fmt in shutil.get_unpack_formats():
                printv(fmt)
                if fmt[0].lower() in magic.lower():
                    # think we'll need to unpack in place and then sudo mv
                    with tempfile.TemporaryDirectory() as tmpdir:
                        dirname = os.path.join(tmpdir, fname)
                        shutil.unpack_archive(tmp_file.name, dirname, fmt[0])
                        if fccachetry:
                            prompt(
                                "sudo mv "
                                + shlex.quote(dirname)
                                + " "
                                + shlex.quote(os.path.join(fontdir, fname))
                            )
                        elif sys.platform.startswith("darwin"):
                            os.system("open " + shlex.quote(dirname) + "/*.ttf")
                            os.system("open " + shlex.quote(dirname) + "/*.otf")
                        break
            else:
                if "font" in magic:
                    prompt(
                        "sudo mv "
                        + shlex.quote(tmp_file.name)
                        + " "
                        + shlex.quote(os.path.join(fontdir, fname))
                    )
                else:
                    printq("Could not figure out what to do! Skipping")
                    printq(link)
Beispiel #24
0
    def recursive_uncompress(self,
                             output_path=None,
                             remove_source=False,
                             remove_sub_sources=True,
                             clean_before_uncompress=False):
        if output_path is None:
            output_path = self.file_output_folder
        else:
            self.file_output_folder = output_path

        self.uncompress(output_path=output_path,
                        remove_source=remove_source,
                        clean_before_uncompress=clean_before_uncompress)
        for compatible_file_format in shutil.get_unpack_formats():
            for file_format in compatible_file_format[1]:
                # print("looking for files " + file_format)
                for file in glob.glob(output_path + "/**/*" + file_format,
                                      recursive=True):
                    # print("Found file " + file)
                    CompressedFile(file).recursive_uncompress(
                        remove_source=remove_sub_sources)
Beispiel #25
0
import urllib.error
import urllib.request
import warnings
from pathlib import Path
from typing import Any, Literal

from ruamel.yaml import YAML


class MkpkgFailedException(Exception):
    pass


SDIST_EXTENSIONS = tuple(extension
                         for (name, extensions,
                              description) in shutil.get_unpack_formats()
                         for extension in extensions)


def _find_sdist(pypi_metadata: dict[str, Any]) -> dict[str, Any] | None:
    """Get sdist file path from the metadata"""
    # The first one we can use. Usually a .tar.gz
    for entry in pypi_metadata["urls"]:
        if entry["packagetype"] == "sdist" and entry["filename"].endswith(
                SDIST_EXTENSIONS):
            return entry
    return None


def _find_wheel(pypi_metadata: dict[str, Any]) -> dict[str, Any] | None:
    """Get wheel file path from the metadata"""
Beispiel #26
0
import argparse
import copy
import logging
import pathlib
import shutil
import sys
import tarfile
import unittest

from .base import Profile, Target, Scope
from .build import Build
from .config import ConfigDict
from .tests import Skip, TestCase
from . import compilers, targets

if not any([ '.xz' in i[1] for i in shutil.get_unpack_formats() ]):
	def _extract_xz(filename, extract_dir):
		try:
			tarobj = tarfile.open(filename)
		except tarfile.TarError as e:
			raise ReadError('{} is not a tar file'.format(filename)) from e

		try:
			tarobj.extractall(extract_dir)
		finally:
			tarobj.close()

	shutil.register_unpack_format('XZ file', ['.xz'], _extract_xz, [], 'Tar file compressed with XZ (LZMA) algorithm')
Beispiel #27
0
def stripPackExt(name):
    for fmt in shutil.get_unpack_formats():
        for ext in fmt[1]:
            if name.endswith(ext):
                return name.rstrip(ext)
    return name
Beispiel #28
0
print(shutil.which("io_example.py"))
print(shutil.which("python3"))

path = os.pathsep.join([".", os.path.expanduser("~")])
mode = os.F_OK | os.R_OK
# path 参数默认是 os.environ('PATH'),但是可以是任何由 os.pathsep 分隔的字符串
filename = shutil.which("jmeter.log", mode=mode, path=path)
print(path)
print(filename)
print()

print("四、 压缩文件")
for format, des in shutil.get_archive_formats():
    print(f"{format:>5}  {des}")

for format, ends, des in shutil.get_unpack_formats():
    print(f"{format :>5} {str(ends): <25} {des}")

logging.basicConfig(format="%(message)s",
                    stream=sys.stdout,
                    level=logging.DEBUG)
logger = logging.getLogger("pymotw")
print("Creating archive:")
shutil.make_archive("test_files/example",
                    "gztar",
                    root_dir=".",
                    base_dir="test_files",
                    logger=logger)
print("\nArchive contents:")
with tarfile.open("test_files/example.tar.gz", "r") as t:
    for n in t.getnames():
Beispiel #29
0
def getSupportedExtensions():
    supported_extensions = []
    for format_ in shutil.get_unpack_formats():
        supported_extensions += format_[1]
    return supported_extensions
Beispiel #30
0
# shutil模块测试,用于拷贝/压缩文件等

import shutil

# shutil.copy('test.csv', 'test_copy.csv')    # 拷贝test.csv为test_copy.csv
# shutil.copyfile('test.csv', 'test_copy_2.csv')
#
# shutil.copytree('../file', '../file2')  # 拷贝目录file为file2
# # 拷贝目录file为file2_ignore,但是会忽略文件中以test和os开头的文件
# shutil.copytree('../file', '../file2_ignore', ignore=shutil.ignore_patterns('test*', 'os*'))

# 把父母录的class以zip格式进行压缩,压缩后生成test.zip文件在当前目录下。test也可以指定为绝对路径,这里是相对路径
shutil.make_archive('test', 'zip', '../class')

shutil.unpack_archive('test.zip', 'extracted',
                      'zip')  # 把当前目录下的test.zip文件以zip算法进行解压缩到当前目录下的extracted目录下

print(shutil.get_archive_formats())  # 获取可用的压缩算法
print(shutil.get_unpack_formats())  # 获取可用的解压缩算法
Beispiel #31
0
    async def download_profile(self, delete=False, chunk_size=4096) -> None:
        """Download all necessary profile files from the internet and extract them."""
        self.download_status = []

        output_dir = Path(self.profile.write_path())
        download_dir = Path(
            self.profile.write_path(
                self.profile.get("download.cache_dir", "download")))

        if delete and download_dir.exists():
            self._logger.debug("Deleting download cache at %s", download_dir)
            shutil.rmtree(download_dir)

        download_dir.mkdir(parents=True, exist_ok=True)

        # Load configuration
        conditions = self.profile.get("download.conditions", {})
        all_files = self.profile.get("download.files", {})
        files_to_copy = {}
        files_to_extract: Dict[str, List[Tuple[str, str]]] = defaultdict(list)
        files_to_download: Set[str] = set()

        async def download_file(url, filename):
            try:
                status = f"Downloading {url} to {filename}"
                self.download_status.append(status)
                self._logger.debug(status)
                os.makedirs(os.path.dirname(filename), exist_ok=True)

                async with self.session.get(url) as response:
                    with open(filename, "wb") as out_file:
                        async for chunk in response.content.iter_chunked(
                                chunk_size):
                            out_file.write(chunk)

                status = f"Downloaded {filename}"
                self.download_status.append(status)
                self._logger.debug(status)
            except Exception:
                self._logger.exception(url)

                # Try to delete partially downloaded file
                try:
                    status = f"Failed to download {filename}"
                    self.download_status.append(status)
                    self._logger.debug(status)
                    os.unlink(filename)
                except Exception:
                    pass

        # Check conditions
        machine_type = platform.machine()
        download_tasks = []
        for setting_name in conditions:
            real_value = self.profile.get(setting_name, None)

            # Compare setting values
            for setting_value, files_dict in conditions[setting_name].items():
                compare_func = self._get_compare_func(setting_value)

                if compare_func(real_value):
                    # Check if file needs to be downloaded
                    for dest_name, src_name in files_dict.items():
                        dest_path = os.path.join(output_dir, dest_name)
                        if ":" in src_name:
                            # File is an archive
                            src_name, src_extract = src_name.split(":",
                                                                   maxsplit=1)
                            src_path = os.path.join(download_dir, src_name)
                            files_to_extract[src_path].append(
                                (dest_path, src_extract))
                        else:
                            # Just a regular file
                            src_path = os.path.join(download_dir, src_name)
                            files_to_copy[src_path] = dest_path

                        # Get download/cache info for file
                        src_info = all_files.get(src_name, None)
                        if src_info is None:
                            self._logger.error("No entry for download file %s",
                                               src_name)
                            continue

                        if not src_info.get("cache", True):
                            # File will be downloaded in-place
                            files_to_copy.pop(src_path)
                            src_path = dest_path

                        # Check if file is already in cache
                        if os.path.exists(src_path) and (
                                os.path.getsize(src_path) > 0):
                            self._logger.debug("Using cached %s for %s",
                                               src_path, dest_name)
                        else:
                            # File needs to be downloaded
                            src_url = src_info.get("url", None)
                            if src_url is None:
                                # Try with machine type
                                if machine_type in src_info:
                                    src_url = src_info[machine_type]["url"]
                                else:
                                    self._logger.error(
                                        "No entry for download file %s with machine type %s",
                                        src_url,
                                        machine_type,
                                    )
                                    continue

                            # Schedule file for download
                            if src_url not in files_to_download:
                                download_tasks.append(
                                    self.loop.create_task(
                                        download_file(src_url, src_path)))
                                files_to_download.add(src_url)

        # Wait for downloads to complete
        await asyncio.gather(*download_tasks)

        # Copy files
        for src_path, dest_path in files_to_copy.items():
            # Remove existing file/directory
            if os.path.isdir(dest_path):
                self._logger.debug("Removing %s", dest_path)
                shutil.rmtree(dest_path)
            elif os.path.isfile(dest_path):
                self._logger.debug("Removing %s", dest_path)
                os.unlink(dest_path)

            # Create necessary directories
            os.makedirs(os.path.dirname(dest_path), exist_ok=True)

            # Copy file/directory as is
            status = f"Copying {src_path} to {dest_path}"
            self.download_status.append(status)
            self._logger.debug(status)
            if os.path.isdir(src_path):
                shutil.copytree(src_path, dest_path)
            else:
                shutil.copy2(src_path, dest_path)

        # Extract/install files
        unpack_extensions = [
            ext for fmt in shutil.get_unpack_formats() for ext in fmt[1]
        ]

        for src_path, extract_paths in files_to_extract.items():
            # Check if the file extension will be understood by shutil.unpack_archive
            known_format = False
            for ext in unpack_extensions:
                if src_path.endswith(ext):
                    known_format = True

            def unpack_default(temp_dir):
                return shutil.unpack_archive(src_path, temp_dir)

            def unpack_gz(temp_dir):
                return self._unpack_gz(src_path, temp_dir)

            unpack = unpack_default

            if not known_format:
                # Handle special archives
                if src_path.endswith(".gz"):
                    # Single file compressed with gzip
                    unpack = unpack_gz
                else:
                    # Very bad situation
                    self._logger.warning(
                        "Unknown archive extension %s. This is probably going to fail.",
                        src_path,
                    )

            # Cached file is an archive. Unpack first.
            with tempfile.TemporaryDirectory() as temp_dir:
                unpack(temp_dir)

                for dest_path, src_extract in extract_paths:
                    src_exclude: Dict[str, List[str]] = {}
                    if "!" in src_extract:
                        extract_parts = src_extract.split("!")
                        src_extract = extract_parts[0]
                        src_exclude = defaultdict(list)
                        for exclude_path in extract_parts[1:]:
                            exclude_path = os.path.join(temp_dir, exclude_path)
                            exclude_dir, exclude_name = os.path.split(
                                exclude_path)
                            src_exclude[exclude_dir].append(exclude_name)

                    # Remove existing file/directory
                    if os.path.isdir(dest_path):
                        self._logger.debug("Removing %s", dest_path)
                        shutil.rmtree(dest_path)
                    elif os.path.isfile(dest_path):
                        self._logger.debug("Removing %s", dest_path)
                        os.unlink(dest_path)

                    # Create necessary directories
                    os.makedirs(os.path.dirname(dest_path), exist_ok=True)

                    if src_extract.endswith(":"):
                        # Unpack .gz inside archive
                        src_path = os.path.join(temp_dir, src_extract[:-1])
                        extract_path = self._unpack_gz(src_path, temp_dir)
                    else:
                        # Regular file
                        extract_path = os.path.join(temp_dir, src_extract)

                    # Copy specific file/directory
                    status = f"Copying {extract_path} to {dest_path}"
                    self.download_status.append(status)
                    self._logger.debug(status)
                    if os.path.isdir(extract_path):
                        if src_exclude:
                            # Ignore some files
                            # pylint: disable=W0640
                            shutil.copytree(
                                extract_path,
                                dest_path,
                                ignore=lambda d, fs: src_exclude[d],
                            )
                        else:
                            # Copy everything
                            shutil.copytree(extract_path, dest_path)
                    else:
                        shutil.copy2(extract_path, dest_path)
def shutil_get_unpack_formats():
    for fmt, exts, desc in shutil.get_unpack_formats():
        print('{:<5}: {}, names ending in {}'.format(fmt, desc, exts))
import shutil
import os

#arc_formats = shutil.get_archive_formats()
unarc_formats = shutil.get_unpack_formats()
print(unarc_formats)

# Source File location
source_file = '/home/arnamaity/Downloads/expense_tracker_api-master.zip'

# Constructing the path of the extraction location.
dir_path = os.getcwd()
path = os.path.join(dir_path, 'expense_tracker_api/')
os.mkdir(path)
dest_dir = path

print(path)

# Extract the files
shutil.unpack_archive(source_file, dest_dir, 'zip')

# Print the Extracted Contents.
dir_list = os.listdir(path)
print('The final directory list: ')
print(dir_list)
Beispiel #34
0
#
# def remove_readonly(func, path, _):
#     "Clear the readonly bit and reattempt the removal"
#     os.chmod(path, stat.S_IWRITE)
#     func(path)
#
# shutil.rmtree(directory, onerror=remove_readonly)

# shutil.make_archive(base_name, format[, root_dir[, base_dir[, verbose[, dry_run[, owner[, group[, logger]]]]]]])
# 创建一个归档文件(例如 zip 或 tar)并返回其名称。
# print(shutil.make_archive("archive_name", 'zip',"./"))
# 这样会出现递归  归档!!

# 我们创建了一个 gzip 压缩的 tar 归档文件,其中包含用户的 .ssh 目录下的所有文件:
# from shutil import make_archive
# import os
# archive_name = os.path.expanduser(os.path.join('~', 'myarchive'))
# root_dir = os.path.expanduser(os.path.join('~', '.ssh'))
# print(make_archive(archive_name, 'gztar', root_dir))
# '/Users/tarek/myarchive.tar.gz'

# shutil.unpack_archive(filename[, extract_dir[, format]])
# 解包一个归档文件。 filename 是归档文件的完整路径。

print(shutil.get_archive_formats())
# [('bztar', "bzip2'ed tar-file"), ('gztar', "gzip'ed tar-file"), ('tar', 'uncompressed tar file'), ('xztar', "xz'ed tar-file"), ('zip', 'ZIP file')]
print(shutil.get_unpack_formats())
# [('bztar', ['.tar.bz2', '.tbz2'], "bzip2'ed tar-file"), ('gztar', ['.tar.gz', '.tgz'], "gzip'ed tar-file"), ('tar', ['.tar'], 'uncompressed tar file'), ('xztar', ['.tar.xz', '.txz'], "xz'ed tar-file"), ('zip', ['.zip'], 'ZIP file')]

print(shutil.get_terminal_size())