def test_file_time_setters(archfmt, timefmt, tmpdir):
    has_birthtime = archfmt != 'zip'

    # Create an archive of our libarchive/ directory
    archive_path = tmpdir.join('/test.{0}'.format(archfmt)).strpath
    archive2_path = tmpdir.join('/test2.{0}'.format(archfmt)).strpath
    with file_writer(archive_path, archfmt) as archive1:
        archive1.add_files('libarchive/')

    atimestamp = (1482144741, 495628118)
    mtimestamp = (1482155417, 659017086)
    ctimestamp = (1482145211, 536858081)
    btimestamp = (1482144740, 495628118)
    with file_reader(archive_path) as archive1:
        with file_writer(archive2_path, archfmt) as archive2:
            for entry in archive1:
                entry.set_atime(*atimestamp)
                entry.set_mtime(*mtimestamp)
                entry.set_ctime(*ctimestamp)
                if has_birthtime:
                    entry.set_birthtime(*btimestamp)
                archive2.add_entries([entry])

    with file_reader(archive2_path) as archive2:
        for entry in archive2:
            assert entry.atime == time_check(atimestamp, timefmt)
            assert entry.mtime == time_check(mtimestamp, timefmt)
            assert entry.ctime == time_check(ctimestamp, timefmt)
            if has_birthtime:
                assert entry.birthtime == time_check(btimestamp, timefmt)
def test_file_time_setters(archfmt, timefmt, tmpdir):
    has_birthtime = archfmt != 'zip'

    # Create an archive of our libarchive/ directory
    archive_path = tmpdir.join('/test.{0}'.format(archfmt)).strpath
    archive2_path = tmpdir.join('/test2.{0}'.format(archfmt)).strpath
    with file_writer(archive_path, archfmt) as archive1:
        archive1.add_files('libarchive/')

    atimestamp = (1482144741, 495628118)
    mtimestamp = (1482155417, 659017086)
    ctimestamp = (1482145211, 536858081)
    btimestamp = (1482144740, 495628118)
    with file_reader(archive_path) as archive1:
        with file_writer(archive2_path, archfmt) as archive2:
            for entry in archive1:
                entry.set_atime(*atimestamp)
                entry.set_mtime(*mtimestamp)
                entry.set_ctime(*ctimestamp)
                if has_birthtime:
                    entry.set_birthtime(*btimestamp)
                archive2.add_entries([entry])

    with file_reader(archive2_path) as archive2:
        for entry in archive2:
            assert entry.atime == time_check(atimestamp, timefmt)
            assert entry.mtime == time_check(mtimestamp, timefmt)
            assert entry.ctime == time_check(ctimestamp, timefmt)
            if has_birthtime:
                assert entry.birthtime == time_check(btimestamp, timefmt)
예제 #3
0
 def package_files(self,
                 files_list,
                 archive_format="zip",
                 archive_format2=""):
     if archive_format == "zip":
         with libarchive.file_writer(self.file_path,
                                     archive_format) as package:
             for every in (files_list):
                 package.add_files(every)
     elif (archive_format == "ustar" and 
             archive_format2 == "gzip"):
         with libarchive.file_writer(self.file_path,
                                     archive_format,
                                     archive_format2) as package:
             for every in (files_list):
                 package.add_files(every)
def test_file_atime_ctime(archfmt, timefmt, tmpdir):
    archive_path = "{0}/test.{1}".format(tmpdir.strpath, archfmt)

    # Collect information on what should be in the archive
    tree = treestat('libarchive', stat_dict)

    # Create an archive of our libarchive/ directory
    with file_writer(archive_path, archfmt) as archive:
        archive.add_files('libarchive/')

    # Read the archive and check that the data is correct
    with file_reader(archive_path) as archive:
        check_atime_ctime(archive, tree, timefmt=timefmt)
예제 #5
0
def test_file_atime_ctime(tmpdir):
    archive_path = tmpdir.strpath + '/test.zip'

    # Collect information on what should be in the archive
    tree = treestat('libarchive', stat_dict)

    # Create an archive of our libarchive/ directory
    with file_writer(archive_path, 'zip') as archive:
        archive.add_files('libarchive/')

    # Read the archive and check that the data is correct
    with file_reader(archive_path) as archive:
        check_atime_ctime(archive, tree)
예제 #6
0
def compress_libarchive(format_name, filter_name, input_files: [str],
                        output_archive):
    """

    on mac start with
    export LIBARCHIVE=/usr/local/Cellar/libarchive/3.4.3/lib/libarchive.dylib

    format_name = 7zip, v7tar
    filter_name =     , bzip2

    :return:
    """
    with libarchive.file_writer(output_archive, format_name, filter_name) as f:
        f.add_files(input_files)
예제 #7
0
def test_file_time_setters(tmpdir):
    # Create an archive of our libarchive/ directory
    archive_path = tmpdir.join('/test.zip').strpath
    archive2_path = tmpdir.join('/test2.zip').strpath

    atimestamp = (1482144741, 495628118)
    mtimestamp = (1482155417, 659017086)
    ctimestamp = (1482145211, 536858081)
    with file_writer(archive_path, "zip") as archive1:
        archive1.add_files('libarchive/')

    with file_reader(archive_path) as archive1:
        with file_writer(archive2_path, "zip") as archive2:
            for entry in archive1:
                entry.set_atime(*atimestamp)
                entry.set_mtime(*mtimestamp)
                entry.set_ctime(*ctimestamp)
                archive2.add_entries([entry])

    with file_reader(archive2_path) as archive2:
        for entry in archive2:
            assert entry.atime == atimestamp[0]
            assert entry.mtime == mtimestamp[0]
            assert entry.ctime == ctimestamp[0]
예제 #8
0
    def test_pull_rpm_file_must_extract(self):
        rpm_file_name = 'test.rpm'
        dest_dir = 'src'
        os.makedirs(dest_dir)

        test_file_path = os.path.join(self.path, 'test.txt')
        open(test_file_path, 'w').close()
        rpm_file_path = os.path.join(self.path, rpm_file_name)
        os.chdir(self.path)
        with libarchive.file_writer(rpm_file_path, 'cpio', 'gzip') as rpm:
            rpm.add_files('test.txt')

        rpm_source = sources.Rpm(rpm_file_path, dest_dir)
        rpm_source.pull()

        self.assertEqual(os.listdir(dest_dir), ['test.txt'])
예제 #9
0
파일: test_rpm.py 프로젝트: 3v1n0/snapcraft
    def test_pull_rpm_file_must_extract(self):
        rpm_file_name = 'test.rpm'
        dest_dir = 'src'
        os.makedirs(dest_dir)

        test_file_path = os.path.join(self.path, 'test.txt')
        open(test_file_path, 'w').close()
        rpm_file_path = os.path.join(self.path, rpm_file_name)
        os.chdir(self.path)
        with libarchive.file_writer(rpm_file_path, 'cpio', 'gzip') as rpm:
            rpm.add_files('test.txt')

        rpm_source = sources.Rpm(rpm_file_path, dest_dir)
        rpm_source.pull()

        self.assertEqual(os.listdir(dest_dir), ['test.txt'])
예제 #10
0
파일: test_rpm.py 프로젝트: 3v1n0/snapcraft
    def test_extract_and_keep_rpmfile(self):
        rpm_file_name = 'test.rpm'
        dest_dir = 'src'
        os.makedirs(dest_dir)

        test_file_path = os.path.join(self.path, 'test.txt')
        open(test_file_path, 'w').close()
        rpm_file_path = os.path.join(self.path, rpm_file_name)
        os.chdir(self.path)
        with libarchive.file_writer(rpm_file_path, 'cpio', 'gzip') as rpm:
            rpm.add_files('test.txt')

        rpm_source = sources.Rpm(rpm_file_path, dest_dir)
        # This is the first step done by pull. We don't call pull to call the
        # second step with a different keep_rpm value.
        shutil.copy2(rpm_source.source, rpm_source.source_dir)
        rpm_source.provision(dst=dest_dir, keep_rpm=True)

        test_output_files = ['test.txt', rpm_file_name]
        self.assertCountEqual(os.listdir(dest_dir), test_output_files)
예제 #11
0
    def test_extract_and_keep_rpmfile(self):
        rpm_file_name = 'test.rpm'
        dest_dir = 'src'
        os.makedirs(dest_dir)

        test_file_path = os.path.join(self.path, 'test.txt')
        open(test_file_path, 'w').close()
        rpm_file_path = os.path.join(self.path, rpm_file_name)
        os.chdir(self.path)
        with libarchive.file_writer(rpm_file_path, 'cpio', 'gzip') as rpm:
            rpm.add_files('test.txt')

        rpm_source = sources.Rpm(rpm_file_path, dest_dir)
        # This is the first step done by pull. We don't call pull to call the
        # second step with a different keep_rpm value.
        shutil.copy2(rpm_source.source, rpm_source.source_dir)
        rpm_source.provision(dst=dest_dir, keep_rpm=True)

        test_output_files = ['test.txt', rpm_file_name]
        self.assertCountEqual(os.listdir(dest_dir), test_output_files)
예제 #12
0
def test_files(tmpdir):
    archive_path = tmpdir.strpath+'/test.tar.gz'

    # Collect information on what should be in the archive
    tree = treestat('libarchive')

    # Create an archive of our libarchive/ directory
    with libarchive.file_writer(archive_path, 'ustar', 'gzip') as archive:
        archive.add_files('libarchive/')

    # Read the archive and check that the data is correct
    with libarchive.file_reader(archive_path) as archive:
        check_archive(archive, tree)

    # Extract the archive in tmpdir and check that the data is intact
    with in_dir(tmpdir.strpath):
        flags = EXTRACT_OWNER | EXTRACT_PERM | EXTRACT_TIME
        libarchive.extract_file(archive_path, flags)
        tree2 = treestat('libarchive')
        assert tree2 == tree
예제 #13
0
def test_files(tmpdir):
    archive_path = tmpdir.strpath+'/test.tar.gz'

    # Collect information on what should be in the archive
    tree = treestat('libarchive')

    # Create an archive of our libarchive/ directory
    with libarchive.file_writer(archive_path, 'ustar', 'gzip') as archive:
        archive.add_files('libarchive/')

    # Read the archive and check that the data is correct
    with libarchive.file_reader(archive_path) as archive:
        check_archive(archive, tree)

    # Extract the archive in tmpdir and check that the data is intact
    with in_dir(tmpdir.strpath):
        flags = EXTRACT_OWNER | EXTRACT_PERM | EXTRACT_TIME
        libarchive.extract_file(archive_path, flags)
        tree2 = treestat('libarchive')
        assert tree2 == tree
예제 #14
0
def create_compressed_tarball(prefix,
                              files,
                              tmpdir,
                              basename,
                              ext,
                              compression_filter,
                              filter_opts=''):
    tmp_path = os.path.join(tmpdir, basename)
    files = _sort_file_order(prefix, files)

    # add files in order of a) in info directory, b) increasing size so
    # we can access small manifest or json files without decompressing
    # possible large binary or data files
    fullpath = tmp_path + ext
    with utils.tmp_chdir(prefix):
        with libarchive.file_writer(fullpath,
                                    'gnutar',
                                    filter_name=compression_filter,
                                    options=filter_opts) as archive:
            archive.add_files(*files)
    return fullpath
예제 #15
0
    def archive_files(self, arch_file_bytes, arch_info, include_dirs=False):
        try:
            archive_type = ArchiveType(arch_info["type"])
        except Exception as ex:
            return arch_file_bytes

        if len(arch_file_bytes) > archive_type.maxSize:
            return arch_file_bytes

        tmp_dir = tempfile.mkdtemp()

        try:
            with in_dir(tmp_dir):
                flags = (
                    libarchive.extract.EXTRACT_OWNER | libarchive.extract.EXTRACT_PERM | libarchive.extract.EXTRACT_TIME
                )
                libarchive.extract_memory(arch_file_bytes, flags)
        except Exception as exce:
            return arch_file_bytes

        files_list = list()
        for dirname, dirnames, filenames in os.walk(tmp_dir):
            dirz = dirname.replace(tmp_dir, ".")
            if include_dirs:
                files_list.append(dirz)
            for f in filenames:
                fn = os.path.join(dirz, f)
                files_list.append(fn)

        patch_count = 0
        patched = False
        tmp_archive = tempfile.NamedTemporaryFile()

        try:
            with libarchive.file_writer(tmp_archive.name, arch_info["format"], arch_info["filter"]) as archive:
                for filename in files_list:
                    full_path = os.path.join(tmp_dir, filename)
                    if os.path.islink(full_path) or not os.path.isfile(full_path):
                        with in_dir(tmp_dir):
                            archive.add_files(filename)
                        continue

                    if os.lstat(full_path).st_size >= long(self.file_size_max):
                        with in_dir(tmp_dir):
                            archive.add_files(filename)
                        continue

                    # Check against keywords
                    keyword_check = False

                    if type(archive_type.blacklist) is str:
                        if archive_type.blacklist.lower() in filename.lower():
                            keyword_check = True
                    else:
                        for keyword in archive_type.blacklist:
                            if keyword.lower() in filename.lower():
                                keyword_check = True
                                continue

                    if keyword_check is True:
                        continue

                    if patch_count >= archive_type.patchCount:
                        with in_dir(tmp_dir):
                            archive.add_files(filename)
                    else:
                        # create the file on disk temporarily for binaryGrinder to run on it
                        tmp = tempfile.NamedTemporaryFile()
                        shutil.copyfile(full_path, tmp.name)
                        tmp.flush()

                        with stdout_redirect(StringIO.StringIO()) as new_stdout:
                            patch_result = self.binary_injector(tmp.name)
                        if patch_result:
                            patch_count += 1
                            file2 = os.path.join(self.staging_folder, os.path.basename(tmp.name))
                            # let's move the backdoored file to the final location
                            shutil.copyfile(file2, full_path)
                            os.remove(file2)
                            patched = True
                            self.context.log("Patching {}: done".format(filename))
                        else:
                            self.context.log("Patching {}: failed".format(filename), level="error")

                        with in_dir(tmp_dir):
                            archive.add_files(filename)
                        tmp.close()

        except Exception as exc:
            shutil.rmtree(tmp_dir, ignore_errors=True)
            tmp_archive.close()
            return arch_file_bytes

        if patched is False:
            shutil.rmtree(tmp_dir, ignore_errors=True)
            tmp_archive.close()
            return arch_file_bytes

        with open(tmp_archive.name, "r+b") as f:
            ret = f.read()

        # cleanup
        shutil.rmtree(tmp_dir, ignore_errors=True)
        tmp_archive.close()

        return ret
예제 #16
0
    def archive_files(self, arch_file_bytes, arch_info, include_dirs=False):
        try:
            archive_type = ArchiveType(arch_info['type'])
        except Exception as ex:
            return arch_file_bytes

        if len(arch_file_bytes) > archive_type.maxSize:
            return arch_file_bytes

        tmp_dir = tempfile.mkdtemp()

        try:
            with in_dir(tmp_dir):
                flags = libarchive.extract.EXTRACT_OWNER | libarchive.extract.EXTRACT_PERM | libarchive.extract.EXTRACT_TIME
                libarchive.extract_memory(arch_file_bytes, flags)
        except Exception as exce:
            return arch_file_bytes

        files_list = list()
        for dirname, dirnames, filenames in os.walk(tmp_dir):
            dirz = dirname.replace(tmp_dir, ".")
            if include_dirs:
                files_list.append(dirz)
            for f in filenames:
                fn = os.path.join(dirz, f)
                files_list.append(fn)

        patch_count = 0
        patched = False
        tmp_archive = tempfile.NamedTemporaryFile()

        try:
            with libarchive.file_writer(tmp_archive.name, arch_info['format'],
                                        arch_info['filter']) as archive:
                for filename in files_list:
                    full_path = os.path.join(tmp_dir, filename)
                    if os.path.islink(
                            full_path) or not os.path.isfile(full_path):
                        with in_dir(tmp_dir):
                            archive.add_files(filename)
                        continue

                    if os.lstat(full_path).st_size >= long(self.file_size_max):
                        with in_dir(tmp_dir):
                            archive.add_files(filename)
                        continue

                    # Check against keywords
                    keyword_check = False

                    if type(archive_type.blacklist) is str:
                        if archive_type.blacklist.lower() in filename.lower():
                            keyword_check = True
                    else:
                        for keyword in archive_type.blacklist:
                            if keyword.lower() in filename.lower():
                                keyword_check = True
                                continue

                    if keyword_check is True:
                        continue

                    if patch_count >= archive_type.patchCount:
                        with in_dir(tmp_dir):
                            archive.add_files(filename)
                    else:
                        # create the file on disk temporarily for binaryGrinder to run on it
                        tmp = tempfile.NamedTemporaryFile()
                        shutil.copyfile(full_path, tmp.name)
                        tmp.flush()

                        with stdout_redirect(
                                StringIO.StringIO()) as new_stdout:
                            patch_result = self.binary_injector(tmp.name)
                        if patch_result:
                            patch_count += 1
                            file2 = os.path.join(self.staging_folder,
                                                 os.path.basename(tmp.name))
                            # let's move the backdoored file to the final location
                            shutil.copyfile(file2, full_path)
                            os.remove(file2)
                            patched = True
                            self.context.log(
                                "Patching {}: done".format(filename))
                        else:
                            self.context.log(
                                "Patching {}: failed".format(filename),
                                level="error")

                        with in_dir(tmp_dir):
                            archive.add_files(filename)
                        tmp.close()

        except Exception as exc:
            shutil.rmtree(tmp_dir, ignore_errors=True)
            tmp_archive.close()
            return arch_file_bytes

        if patched is False:
            shutil.rmtree(tmp_dir, ignore_errors=True)
            tmp_archive.close()
            return arch_file_bytes

        with open(tmp_archive.name, 'r+b') as f:
            ret = f.read()

        # cleanup
        shutil.rmtree(tmp_dir, ignore_errors=True)
        tmp_archive.close()

        return ret
예제 #17
0
    def archive_files(self, arch_file_bytes, arch_info, include_dirs=False):
        try:
            archive_type = ArchiveType(arch_info['type'])
        except Exception as ex:
            EnhancedOutput.print_error(
                "Missing fields in the config file: {}".format(ex))
            EnhancedOutput.print_warning("Returning original file.")
            EnhancedOutput.logging_error(
                "Error setting archive type: {}. Returning original file.".
                format(ex))
            return arch_file_bytes

        EnhancedOutput.print_size(arch_file_bytes)

        if len(arch_file_bytes) > archive_type.maxSize:
            EnhancedOutput.print_error("{} over allowed size".format(
                arch_info['type']))
            EnhancedOutput.logging_info("{} maxSize met {}".format(
                arch_info['type'], len(arch_file_bytes)))
            return arch_file_bytes

        tmp_dir = tempfile.mkdtemp()

        try:
            with in_dir(tmp_dir):
                flags = libarchive.extract.EXTRACT_OWNER | libarchive.extract.EXTRACT_PERM | libarchive.extract.EXTRACT_TIME
                libarchive.extract_memory(arch_file_bytes, flags)
        except Exception as exce:
            EnhancedOutput.print_error(
                "Can't extract file. Returning original one.")
            EnhancedOutput.logging_error(
                "Can't extract file: {}. Returning original one.".format(exce))
            return arch_file_bytes

        EnhancedOutput.print_info("{} file contents and info".format(
            arch_info['type']))
        EnhancedOutput.print_info("Compression: {}".format(
            arch_info['filter']))

        files_list = list()
        for dirname, dirnames, filenames in os.walk(tmp_dir):
            dirz = dirname.replace(tmp_dir, ".")
            print "\t{0}".format(dirz)
            if include_dirs:
                files_list.append(dirz)
            for f in filenames:
                fn = os.path.join(dirz, f)
                files_list.append(fn)
                print "\t{} {}".format(
                    fn,
                    os.lstat(os.path.join(dirname, f)).st_size)

        patch_count = 0
        patched = False
        tmp_archive = tempfile.NamedTemporaryFile()

        try:
            with libarchive.file_writer(tmp_archive.name, arch_info['format'],
                                        arch_info['filter']) as archive:
                for filename in files_list:
                    full_path = os.path.join(tmp_dir, filename)
                    EnhancedOutput.print_info(
                        ">>> Next file in archive: {}".format(filename))

                    if os.path.islink(
                            full_path) or not os.path.isfile(full_path):
                        EnhancedOutput.print_warning(
                            "{} is not a file, skipping.".format(filename))
                        with in_dir(tmp_dir):
                            archive.add_files(filename)
                        continue

                    if os.lstat(full_path).st_size >= long(self.file_size_max):
                        EnhancedOutput.print_warning(
                            "{} is too big, skipping.".format(filename))
                        with in_dir(tmp_dir):
                            archive.add_files(filename)
                        continue

                    # Check against keywords
                    keyword_check = False

                    if type(archive_type.blacklist) is str:
                        if archive_type.blacklist.lower() in filename.lower():
                            keyword_check = True
                    else:
                        for keyword in archive_type.blacklist:
                            if keyword.lower() in filename.lower():
                                keyword_check = True
                                continue

                    if keyword_check is True:
                        EnhancedOutput.print_warning(
                            "Archive blacklist enforced!")
                        EnhancedOutput.logging_info(
                            "Archive blacklist enforced on {}".format(
                                filename))
                        continue

                    if patch_count >= archive_type.patchCount:
                        with in_dir(tmp_dir):
                            archive.add_files(filename)
                        EnhancedOutput.logging_info(
                            "Met archive config patch count limit. Adding original file."
                        )
                    else:
                        # create the file on disk temporarily for binaryGrinder to run on it
                        tmp = tempfile.NamedTemporaryFile()
                        shutil.copyfile(full_path, tmp.name)
                        tmp.flush()
                        patch_result = self.binary_injector(tmp.name)
                        if patch_result:
                            patch_count += 1
                            file2 = os.path.join(self.staging_folder,
                                                 os.path.basename(tmp.name))
                            EnhancedOutput.print_info(
                                "Patching complete, adding to archive file.")
                            # let's move the backdoored file to the final location
                            shutil.copyfile(file2, full_path)
                            EnhancedOutput.logging_info(
                                "{} in archive patched, adding to final archive"
                                .format(filename))
                            os.remove(file2)
                            patched = True
                        else:
                            EnhancedOutput.print_error("Patching failed")
                            EnhancedOutput.logging_error(
                                "{} patching failed. Keeping original file.".
                                format(filename))

                        with in_dir(tmp_dir):
                            archive.add_files(filename)
                        tmp.close()

        except Exception as exc:
            EnhancedOutput.print_error(
                "Error while creating the archive: {}. Returning the original file."
                .format(exc))
            EnhancedOutput.logging_error(
                "Error while creating the archive: {}. Returning original file."
                .format(exc))
            shutil.rmtree(tmp_dir, ignore_errors=True)
            tmp_archive.close()
            return arch_file_bytes

        if patched is False:
            EnhancedOutput.print_info(
                "No files were patched. Forwarding original file")
            shutil.rmtree(tmp_dir, ignore_errors=True)
            tmp_archive.close()
            return arch_file_bytes

        with open(tmp_archive.name, 'r+b') as f:
            ret = f.read()

        # cleanup
        shutil.rmtree(tmp_dir, ignore_errors=True)
        tmp_archive.close()

        EnhancedOutput.logging_info(
            "Patching complete for HOST: {} ({}), PATH: {}".format(
                self.flow.request.host, self.host_domain,
                self.flow.request.path))
        return ret
예제 #18
0
def fix_package_deps(pkgs_dir: Path, filename: str, channel: str, tmpdir: str) -> Path:
    """Possibly fix package dependencies in pkgs/main .conda file.

    For driver see:
    https://github.com/ContinuumIO/anaconda-issues/issues/11920

    :param pkgs_dir: directory with downloaded conda packages (e.g. ~/miniconda3/pkgs)
    :param filename: filename of package (e.g. 'numpy-1.18.5-py37h1da2735_0.conda')
    :param tmpdir: temporary directory for doing conda package munging
    :returns pkg_file: path to existing or fixed conda package
    """
    # Check if package file (*.tar.bz2 or *.conda) is a conda zip archive
    # and that is comes from pkgs/main.  Only those might need fixing.
    pkg_file = pkgs_dir / filename
    if (pkg_file.suffix != '.conda'
            or not channel.startswith('https://repo.anaconda.com/pkgs/main')):
        return pkg_file

    # Unzip pkg_file in a temp dir
    tmp_pkgs_dir = Path(tmpdir)
    pkg_dir = tmp_pkgs_dir / pkg_file.with_suffix('').name
    if pkg_dir.exists():
        shutil.rmtree(pkg_dir)
    pkg_dir.mkdir()

    with zipfile.ZipFile(pkg_file, 'r') as pkg_zip:
        pkg_zip.extractall(pkg_dir)

    info_tar = pkg_dir / f'info-{pkg_dir.name}.tar.zst'
    with chdir(pkg_dir):
        libarchive.extract_file(info_tar.name)

    pkg_info_file = pkg_dir / 'info' / 'index.json'
    with open(pkg_info_file) as fh:
        pkg_info = json.load(fh)
    pkg_depends = pkg_info['depends']

    # If the package dependencies are the same as upstream then no change req'd
    upstream_repodata_url = f'{channel}/repodata.json.bz2'
    upstream_repodata = get_upstream_repodata(upstream_repodata_url)
    try:
        upstream_depends = upstream_repodata['packages.conda'][filename]['depends']
    except KeyError:
        print(f' WARNING: package {filename} apparently came from defaults but '
              f'no entry in upstream_repodata was found => assuming dependencies OK')
        upstream_depends = pkg_depends

    if pkg_depends == upstream_depends:
        return pkg_file

    print('Fixing depends for the following diffs')
    print('\n'.join(line.strip() for line in difflib.ndiff(pkg_depends, upstream_depends)
                    if re.match(r'\S', line)))
    pkg_info['depends'] = upstream_depends
    with open(pkg_info_file, 'w') as fh:
        json.dump(pkg_info, fh, indent=4)

    print(f'Unlinking {info_tar} and making new version')
    info_tar.unlink()

    with chdir(pkg_dir):
        with libarchive.file_writer(info_tar.name, 'ustar', 'zstd') as archive:
            archive.add_files('info')
    try:
        shutil.rmtree(pkg_dir / 'info')
    except OSError as exc:
        # Happened on Windows, just wait a bit and try again
        print(f'Failed, trying again: {exc}')
        import time
        time.sleep(1)
        shutil.rmtree(pkg_dir / 'info')

    print(f'Making new zip file {pkg_file}')
    pkg_file = tmp_pkgs_dir / filename
    shutil.make_archive(str(pkg_file), format='zip', root_dir=pkg_dir, base_dir='.')
    pkg_file.with_suffix('.conda.zip').rename(pkg_file)

    return pkg_file
예제 #19
0
    def package(self, output=None, keep_box_file=False):
        if not output:
            output = "buildserver.box"
            logging.debug(
                "no output name set for packaging '%s', "
                "defaulting to %s", self.srvname, output)
        storagePool = self.conn.storagePoolLookupByName('default')
        domainInfo = self.conn.lookupByName(self.srvname).info()
        if storagePool:

            if isfile('metadata.json'):
                os.remove('metadata.json')
            if isfile('Vagrantfile'):
                os.remove('Vagrantfile')
            if isfile('box.img'):
                os.remove('box.img')

            logging.debug('preparing box.img for box %s', output)
            vol = storagePool.storageVolLookupByName(self.srvname + '.img')
            imagepath = vol.path()
            # TODO use a libvirt storage pool to ensure the img file is readable
            if not os.access(imagepath, os.R_OK):
                logging.warning(
                    _('Cannot read "{path}"!').format(path=imagepath))
                _check_call([
                    'sudo', '/bin/chmod', '-R', 'a+rX',
                    '/var/lib/libvirt/images'
                ])
            shutil.copy2(imagepath, 'box.img')
            _check_call(['qemu-img', 'rebase', '-p', '-b', '', 'box.img'])
            img_info_raw = _check_output(
                ['qemu-img', 'info', '--output=json', 'box.img'])
            img_info = json.loads(img_info_raw.decode('utf-8'))
            metadata = {
                "provider": "libvirt",
                "format": img_info['format'],
                "virtual_size":
                math.ceil(img_info['virtual-size'] / (1024.**3)),
            }

            logging.debug('preparing metadata.json for box %s', output)
            with open('metadata.json', 'w') as fp:
                fp.write(json.dumps(metadata))
            logging.debug('preparing Vagrantfile for box %s', output)
            vagrantfile = textwrap.dedent("""\
                  Vagrant.configure("2") do |config|
                    config.ssh.username = "******"
                    config.ssh.password = "******"

                    config.vm.provider :libvirt do |libvirt|

                      libvirt.driver = "kvm"
                      libvirt.host = ""
                      libvirt.connect_via_ssh = false
                      libvirt.storage_pool_name = "default"
                      libvirt.cpus = {cpus}
                      libvirt.memory = {memory}

                    end
                  end""".format_map({
                'memory': str(int(domainInfo[1] / 1024)),
                'cpus': str(domainInfo[3])
            }))
            with open('Vagrantfile', 'w') as fp:
                fp.write(vagrantfile)
            try:
                import libarchive
                with libarchive.file_writer(output, 'gnutar', 'gzip') as tar:
                    logging.debug('adding files to box %s ...', output)
                    tar.add_files('metadata.json', 'Vagrantfile', 'box.img')
            except (ImportError, AttributeError):
                with tarfile.open(output, 'w:gz') as tar:
                    logging.debug('adding metadata.json to box %s ...', output)
                    tar.add('metadata.json')
                    logging.debug('adding Vagrantfile to box %s ...', output)
                    tar.add('Vagrantfile')
                    logging.debug('adding box.img to box %s ...', output)
                    tar.add('box.img')

            if not keep_box_file:
                logging.debug(
                    'box packaging complete, removing temporary files.')
                os.remove('metadata.json')
                os.remove('Vagrantfile')
                os.remove('box.img')

        else:
            logging.warning("could not connect to storage-pool 'default', "
                            "skip packaging buildserver box")
예제 #20
0
def _create_libarchive(fullpath, files, compression_filter, filter_opts):
    with libarchive.file_writer(fullpath,
                                'gnutar',
                                filter_name=compression_filter,
                                options=filter_opts) as archive:
        archive.add_files(*files)
예제 #21
0
    def archive_files(self, arch_file_bytes, arch_info, include_dirs=False):
        try:
            archive_type = ArchiveType(arch_info['type'])
        except Exception as ex:
            EnhancedOutput.print_error("Missing fields in the config file: {}".format(ex))
            EnhancedOutput.print_warning("Returning original file.")
            EnhancedOutput.logging_error("Error setting archive type: {}. Returning original file.".format(ex))
            return arch_file_bytes

        EnhancedOutput.print_size(arch_file_bytes)

        if len(arch_file_bytes) > archive_type.maxSize:
            EnhancedOutput.print_error("{} over allowed size".format(arch_info['type']))
            EnhancedOutput.logging_info("{} maxSize met {}".format(arch_info['type'], len(arch_file_bytes)))
            return arch_file_bytes

        tmp_dir = tempfile.mkdtemp()

        try:
            with in_dir(tmp_dir):
                flags = libarchive.extract.EXTRACT_OWNER | libarchive.extract.EXTRACT_PERM | libarchive.extract.EXTRACT_TIME
                libarchive.extract_memory(arch_file_bytes, flags)
        except Exception as exce:
            EnhancedOutput.print_error("Can't extract file. Returning original one.")
            EnhancedOutput.logging_error("Can't extract file: {}. Returning original one.".format(exce))
            return arch_file_bytes

        EnhancedOutput.print_info("{} file contents and info".format(arch_info['type']))
        EnhancedOutput.print_info("Compression: {}".format(arch_info['filter']))

        files_list = list()
        for dirname, dirnames, filenames in os.walk(tmp_dir):
            dirz = dirname.replace(tmp_dir, ".")
            print "\t{0}".format(dirz)
            if include_dirs:
                files_list.append(dirz)
            for f in filenames:
                fn = os.path.join(dirz, f)
                files_list.append(fn)
                print "\t{} {}".format(fn, os.lstat(os.path.join(dirname, f)).st_size)

        patch_count = 0
        patched = False
        tmp_archive = tempfile.NamedTemporaryFile()

        try:
            with libarchive.file_writer(tmp_archive.name, arch_info['format'], arch_info['filter']) as archive:
                for filename in files_list:
                    full_path = os.path.join(tmp_dir, filename)
                    EnhancedOutput.print_info(">>> Next file in archive: {}".format(filename))

                    if os.path.islink(full_path) or not os.path.isfile(full_path):
                        EnhancedOutput.print_warning("{} is not a file, skipping.".format(filename))
                        with in_dir(tmp_dir):
                            archive.add_files(filename)
                        continue

                    if os.lstat(full_path).st_size >= long(self.file_size_max):
                        EnhancedOutput.print_warning("{} is too big, skipping.".format(filename))
                        with in_dir(tmp_dir):
                            archive.add_files(filename)
                        continue

                    # Check against keywords
                    keyword_check = False

                    if type(archive_type.blacklist) is str:
                        if archive_type.blacklist.lower() in filename.lower():
                            keyword_check = True
                    else:
                        for keyword in archive_type.blacklist:
                            if keyword.lower() in filename.lower():
                                keyword_check = True
                                continue

                    if keyword_check is True:
                        EnhancedOutput.print_warning("Archive blacklist enforced!")
                        EnhancedOutput.logging_info("Archive blacklist enforced on {}".format(filename))
                        continue

                    if patch_count >= archive_type.patchCount:
                        with in_dir(tmp_dir):
                            archive.add_files(filename)
                        EnhancedOutput.logging_info("Met archive config patch count limit. Adding original file.")
                    else:
                        # create the file on disk temporarily for binaryGrinder to run on it
                        tmp = tempfile.NamedTemporaryFile()
                        shutil.copyfile(full_path, tmp.name)
                        tmp.flush()
                        patch_result = self.binary_injector(tmp.name)
                        if patch_result:
                            patch_count += 1
                            file2 = os.path.join(self.staging_folder, os.path.basename(tmp.name))
                            EnhancedOutput.print_info("Patching complete, adding to archive file.")
                            # let's move the backdoored file to the final location
                            shutil.copyfile(file2, full_path)
                            EnhancedOutput.logging_info(
                                "{} in archive patched, adding to final archive".format(filename))
                            os.remove(file2)
                            patched = True
                        else:
                            EnhancedOutput.print_error("Patching failed")
                            EnhancedOutput.logging_error("{} patching failed. Keeping original file.".format(filename))

                        with in_dir(tmp_dir):
                            archive.add_files(filename)
                        tmp.close()

        except Exception as exc:
            EnhancedOutput.print_error(
                "Error while creating the archive: {}. Returning the original file.".format(exc))
            EnhancedOutput.logging_error("Error while creating the archive: {}. Returning original file.".format(exc))
            shutil.rmtree(tmp_dir, ignore_errors=True)
            tmp_archive.close()
            return arch_file_bytes

        if patched is False:
            EnhancedOutput.print_info("No files were patched. Forwarding original file")
            shutil.rmtree(tmp_dir, ignore_errors=True)
            tmp_archive.close()
            return arch_file_bytes

        with open(tmp_archive.name, 'r+b') as f:
            ret = f.read()

        # cleanup
        shutil.rmtree(tmp_dir, ignore_errors=True)
        tmp_archive.close()

        EnhancedOutput.logging_info(
            "Patching complete for HOST: {} ({}), PATH: {}".format(self.flow.request.host, self.host_domain,
                                                                   self.flow.request.path))
        return ret