def test_sh_rm_broken_symlink(): with sh.tmpdir(): os.symlink('afile-notexist', 'alink') assert not path.exists('alink') assert path.lexists('alink') sh.rm('alink') assert not path.lexists('alink')
def create(path, data_root, bpkg): """Create a PyPM package at ``path`` The created package will contain: - data.tar.gz -- contents of ``data_root` - info.json -- metadata of ``bpkg`` Return the contents of info.json that was added to the package """ assert type(bpkg) is BinaryPackage pkgroot = tempfile.mkdtemp('-pkgroot', 'pypm-') # generate info.json info_json = bpkg.to_json() def pack_contents(fn, parentdir): """Pack the contents of directory ``parentdir`` into ``fn``""" sh.pack_archive( fn, [xjoin(parentdir, f) for f in os.listdir(parentdir)], parentdir) # create the .tar.gz file (.pypm) pack_contents(xjoin(pkgroot, 'data.tar.gz'), data_root) open(xjoin(pkgroot, 'info.json'), 'w').write(info_json) pack_contents(path, pkgroot) sh.rm(pkgroot) return info_json
def create(path, data_root, bpkg): """Create a PyPM package at ``path`` The created package will contain: - data.tar.gz -- contents of ``data_root` - info.json -- metadata of ``bpkg`` Return the contents of info.json that was added to the package """ assert type(bpkg) is BinaryPackage pkgroot = tempfile.mkdtemp('-pkgroot', 'pypm-') # generate info.json info_json = bpkg.to_json() def pack_contents(fn, parentdir): sh.pack_archive( fn, [xjoin(parentdir, f) for f in os.listdir(parentdir)], parentdir) # create the .tar.gz file (.pypm) pack_contents(xjoin(pkgroot, 'data.tar.gz'), data_root) open(xjoin(pkgroot, 'info.json'), 'w').write(info_json) pack_contents(path, pkgroot) sh.rm(pkgroot) return info_json
def test_sh_rm_symlink(): with sh.tmpdir(): with open('afile', 'w') as f: f.close() assert path.exists('afile') os.symlink('afile', 'alink') assert path.lexists('alink') sh.rm('alink') assert not path.lexists('alink')
def test_sh_rm_dir(): with sh.tmpdir(): sh.mkdirs('adir') with sh.cd('adir'): with open('afile', 'w') as f: f.close() assert path.exists('afile') assert path.exists('adir') sh.rm('adir') assert not path.exists('adir')
def test_pkg(pkgpath): testdir = tempfile.mkdtemp('-test', 'pypm-') extracted_dir, _ = sh.unpack_archive(pkgpath, testdir) # check if we have read access on the directory for child in os.listdir(extracted_dir): p = path.join(extracted_dir, child) if path.isdir(p): os.listdir(p) sh.rm(testdir)
def download_index(self, target_file, force, verbose=True, interactive=True): """Download repository index unless it was downloaded recently (Etag) - force: Do not use cache; always download - verbose: If False, try not to print (LOG.info) anything to console unless an actual download happens. Return True if download actually happened. """ def start_info(status): if status == 'Hit' and not verbose: return None return '%s: [%s] :repository-index:' % ( status, six.moves.urlparse(self.url).netloc) index_url = url_join(self.url, [self.REMOTE_INDEX_FILENAME]) try: idxgz_file, downloaded = net.download_file( index_url, P.dirname(target_file), { 'use_cache': not force, 'save_properties': True, 'start_info': start_info, }, interactive=interactive) if not downloaded: # index was not updated return False except six.moves.HTTPError as e: if e.code == 404: # Not Found raise ValueError( '{0.url} does not appear to be a valid repository ' 'because {1} is missing.'.format(self, index_url)) raise # extract index.gz (REMOTE_INDEX_FILENAME) to index (target_file) try: with closing(gzip.open(idxgz_file, 'rb')) as f: with open(target_file, 'wb') as f2: f2.write(f.read()) except: # If an error occurs during extraction, simply delete the index file # (so that it will get properly synced during next sync) sh.rm(target_file) sh.rm(idxgz_file) raise return True
def undo_extract(self, files_list): """Undo whatever self.extract_package did""" # sort in descending order so that children of a directory # get removed before the directory itself files_list.sort() files_list.reverse() for path in files_list: path = self.pypmenv.pyenv.get_abspath(path) if not os.path.lexists(path): LOG.warn('no longer exists: %s', path) else: if os.path.isfile(path) or os.path.islink(path): sh.rm(path) # remove the corresponding .pyc and .pyo files if path.endswith('.py'): sh.rm(path + 'c') sh.rm(path + 'o') elif os.path.isdir(path): if len(os.listdir(path)) > 0: # cannot delete directory with files added # after the installation LOG.debug('non-empty directory: %s - hence skipping', path) else: # directory `path` is empty sh.rm(path) else: raise TypeError( "don't know what to do with this type of file: " + path)
def undo_extract(self, files_list): """Undo whatever self.extract_package did""" # sort in descending order so that children of a directory # get removed before the directory itself files_list.sort() files_list.reverse() for path in files_list: path = self.pypmenv.pyenv.get_abspath(path) if not os.path.exists(path): LOG.warn('no longer exists: %s', path) else: if os.path.isfile(path): sh.rm(path) # remove the corresponding .pyc and .pyo files if path.endswith('.py'): sh.rm(path+'c') sh.rm(path+'o') elif os.path.isdir(path): if len(os.listdir(path)) > 0: # cannot delete directory with files added # after the installation LOG.debug( 'non-empty directory: %s - hence skipping', path) else: # directory `path` is empty sh.rm(path) else: raise TypeError( "don't know what to do with this type of file: " + path)
def download_index(self, target_file, force, verbose=True, interactive=True): """Download repository index unless it was downloaded recently (Etag) - force: Do not use cache; always download - verbose: If False, try not to print (LOG.info) anything to console unless an actual download happens. Return True if download actually happened. """ def start_info(status): if status == 'Hit' and not verbose: return None return '%s: [%s] :repository-index:' % ( status, six.moves.urlparse(self.url).netloc) index_url = url_join(self.url, [self.REMOTE_INDEX_FILENAME]) try: idxgz_file, downloaded = net.download_file(index_url, P.dirname(target_file), { 'use_cache': not force, 'save_properties': True, 'start_info': start_info, }, interactive=interactive) if not downloaded: # index was not updated return False except six.moves.HTTPError as e: if e.code == 404: # Not Found raise ValueError( '{0.url} does not appear to be a valid repository ' 'because {1} is missing.'.format(self, index_url)) raise # extract index.gz (REMOTE_INDEX_FILENAME) to index (target_file) try: with closing(gzip.open(idxgz_file, 'rb')) as f: with open(target_file, 'wb') as f2: f2.write(f.read()) except: # If an error occurs during extraction, simply delete the index file # (so that it will get properly synced during next sync) sh.rm(target_file) sh.rm(idxgz_file) raise return True
def generate_index(self): """Generated the repository index file (`index.gz`) index.gz is the compressed sqlite index containing all of the succeeded packages in the repository pool. Return the number of packages added to the repository index. """ assert exists(self.repository.path) idx_path = xjoin(self.repository.path, 'index') idx_gz_path = idx_path + '.gz' sh.rm(idx_path) db = RepoPackageDatabase(idx_path, touch=True) # Tag BE packages; so client may use it to determine if a package is # available only to BE customers or not. # See also: RepoPackage.requires_be_license property pkgtags = 'be' if self.repository.name == 'be' else '' with closing(db): LOG.debug('finding packages in %s', self.repository.path) packages = self.repository.find_packages() LOG.debug('processing %d packages', len(packages)) rpkg_list = [ RepoPackage.create_from( BinaryPackage(**self._read_info_json(pkgfile)), relpath=relpath(pkgfile, self.repository.path), tags=pkgtags) for pkgfile in textui.ProgressBar.iterate(packages, note="Package") ] # Optimize index size by removing the "description" field. # PyPI's descriptions are typically very long - see # http://pypi.python.org/pypi/zc.buildout for example - hence we # must remove them from the index. for rpkg in rpkg_list: rpkg.description = '' # keep only the latest pkg_version in index LOG.debug("pruning older pkg_version's") rpkg_list = _prune_older_binary_releases(rpkg_list) LOG.debug('.. resulting in %d packages', len(rpkg_list)) LOG.info(' writing index (please wait) ...') with db.transaction() as session: session.add_all(rpkg_list) session.commit() session.close() LOG.info(' compressing index: ...%s%s', os.path.basename(idx_gz_path), (' (%d)' % len(rpkg_list)) if rpkg_list else '') sh.rm(idx_gz_path) with closing(gzip.open(idx_gz_path, 'wb')) as f: f.write(open(idx_path, 'rb').read()) sh.rm(idx_path) return len(rpkg_list)
def generate_index(self): """Generated the repository index file (`index.gz`) index.gz is the compressed sqlite index containing all of the succeeded packages in the repository pool. Return the number of packages added to the repository index. """ from pypm.grail.package import PackageShare assert P.exists(self.repository.path) idx_path = xjoin(self.repository.path, 'index') idx_gz_path = idx_path + '.gz' sh.rm(idx_path) db = RepoPackageDatabase(idx_path, touch=True) # Tag BE packages; so client may use it to determine if a package is # available only to BE customers or not. # See also: RepoPackage.requires_be_license property pkgtags = 'be' if self.repository.name == 'be' else '' # Load package-specific data from share/p/* pkgdata = dict([(s.name, s) for s in PackageShare.all()]) with closing(db): LOG.debug('finding packages in %s', self.repository.path) packages = self.repository.find_packages() LOG.debug('processing %d packages', len(packages)) rpkg_list = [ RepoPackage.create_from( BinaryPackage(**self._read_info_json(pkgfile)), relpath=P.relpath(pkgfile, self.repository.path), tags=pkgtags) for pkgfile in textui.ProgressBar.iterate(packages, note="Package") ] for rpkg in rpkg_list: # Optimize index size by removing the "description" field. # PyPI's descriptions are typically very long - see # http://pypi.python.org/pypi/zc.buildout for example - hence we # must remove them from the index. rpkg.description = '' if rpkg.name in pkgdata: # Add package notes to the description^Wextra field # See pypm.common.package.RepoPackage.FIELDS to understand # why we are abusing this field. notes = list(pkgdata[rpkg.name].get_notes_for( pyver=rpkg.pyver, osarch=rpkg.osarch)) rpkg.description = json.dumps({'notes': notes}) LOG.debug('Patching "description" field for %s', rpkg) # keep only the latest pkg_version in index LOG.debug("pruning older pkg_version's") rpkg_list = _prune_older_binary_releases(rpkg_list) LOG.debug('.. resulting in %d packages', len(rpkg_list)) LOG.info(' writing index (please wait) ...') with db.transaction() as session: session.add_all(rpkg_list) session.commit() session.close() LOG.info(' compressing index: ...%s%s', os.path.basename(idx_gz_path), (' (%d)' % len(rpkg_list)) if rpkg_list else '') sh.rm(idx_gz_path) with closing(gzip.open(idx_gz_path, 'wb')) as f: f.write(open(idx_path, 'rb').read()) sh.rm(idx_path) return len(rpkg_list)
def test_sh_rm_file(): with sh.tmpdir(): with open('afile', 'w') as f: f.close() assert path.exists('afile') sh.rm('afile') assert not path.exists('afile')
def generate_index(self): """Generated the repository index file (`index.gz`) index.gz is the compressed sqlite index containing all of the succeeded packages in the repository pool. Return the number of packages added to the repository index. """ from pypm.grail.package import PackageShare assert P.exists(self.repository.path) idx_path = xjoin(self.repository.path, 'index') idx_gz_path = idx_path + '.gz' sh.rm(idx_path) db = RepoPackageDatabase(idx_path, touch=True) # Tag BE packages; so client may use it to determine if a package is # available only to BE customers or not. # See also: RepoPackage.requires_be_license property pkgtags = 'be' if self.repository.name == 'be' else '' # Load package-specific data from share/p/* pkgdata = dict([(s.name, s) for s in PackageShare.all()]) with closing(db): LOG.debug('finding packages in %s', self.repository.path) packages = self.repository.find_packages() LOG.debug('processing %d packages', len(packages)) rpkg_list = [ RepoPackage.create_from( BinaryPackage(**self._read_info_json(pkgfile)), relpath=P.relpath(pkgfile, self.repository.path), tags=pkgtags) for pkgfile in textui.ProgressBar.iterate(packages, note="Package") ] for rpkg in rpkg_list: # Optimize index size by removing the "description" field. # PyPI's descriptions are typically very long - see # http://pypi.python.org/pypi/zc.buildout for example - hence we # must remove them from the index. rpkg.description = '' if rpkg.name in pkgdata: # Add package notes to the description^Wextra field # See pypm.common.package.RepoPackage.FIELDS to understand # why we are abusing this field. notes = list(pkgdata[rpkg.name].get_notes_for( pyver=rpkg.pyver, osarch=rpkg.osarch)) rpkg.description = json.dumps({ 'notes': notes }) LOG.debug('Patching "description" field for %s', rpkg) # keep only the latest pkg_version in index LOG.debug("pruning older pkg_version's") rpkg_list = _prune_older_binary_releases(rpkg_list) LOG.debug('.. resulting in %d packages', len(rpkg_list)) LOG.info(' writing index (please wait) ...') with db.transaction() as session: session.add_all(rpkg_list) session.commit() session.close() LOG.info(' compressing index: ...%s%s', os.path.basename(idx_gz_path), (' (%d)' % len(rpkg_list)) if rpkg_list else '') sh.rm(idx_gz_path) with closing(gzip.open(idx_gz_path, 'wb')) as f: f.write(open(idx_path, 'rb').read()) sh.rm(idx_path) return len(rpkg_list)