예제 #1
0
    def create(path, data_root, bpkg):
        """Create a PyPM package at ``path``
        
        The created package will contain:
        
            - data.tar.gz -- contents of ``data_root`
            - info.json   -- metadata of ``bpkg``
            
        Return the contents of info.json that was added to the package
        """
        assert type(bpkg) is BinaryPackage
        pkgroot = tempfile.mkdtemp('-pkgroot', 'pypm-')

        # generate info.json
        info_json = bpkg.to_json()

        def pack_contents(fn, parentdir):
            sh.pack_archive(
                fn,
                [xjoin(parentdir, f) for f in os.listdir(parentdir)],
                parentdir)
        
        # create the .tar.gz file (.pypm)
        pack_contents(xjoin(pkgroot, 'data.tar.gz'), data_root)
        open(xjoin(pkgroot, 'info.json'), 'w').write(info_json)
        pack_contents(path, pkgroot)

        sh.rm(pkgroot)
        return info_json
예제 #2
0
    def create(path, data_root, bpkg):
        """Create a PyPM package at ``path``
        
        The created package will contain:
        
            - data.tar.gz -- contents of ``data_root`
            - info.json   -- metadata of ``bpkg``
            
        Return the contents of info.json that was added to the package
        """
        assert type(bpkg) is BinaryPackage
        pkgroot = tempfile.mkdtemp('-pkgroot', 'pypm-')

        # generate info.json
        info_json = bpkg.to_json()

        def pack_contents(fn, parentdir):
            """Pack the contents of directory ``parentdir`` into ``fn``"""
            sh.pack_archive(
                fn, [xjoin(parentdir, f) for f in os.listdir(parentdir)],
                parentdir)

        # create the .tar.gz file (.pypm)
        pack_contents(xjoin(pkgroot, 'data.tar.gz'), data_root)
        open(xjoin(pkgroot, 'info.json'), 'w').write(info_json)
        pack_contents(path, pkgroot)

        sh.rm(pkgroot)
        return info_json
예제 #3
0
    def _transform_path(self, p):
        """Transform original path using our image_root"""
        # Find the "base directory" of target Python executable
        # And then replace that with our image root in `p`
        if sys.platform.startswith('win'):
            original_root = xjoin(P.dirname(self.target_python_exe))
        else:
            original_root = xjoin(P.dirname(self.target_python_exe), '..')

        # Use `lower()` for case-insensitive filesystems like NTFS/FAT
        if p.lower().startswith(self.root_dir.lower()):
            return p
        else:
            assert p.lower().startswith(original_root.lower()), \
                '"%s" is different from "%s"' % (p, original_root)
            return xjoin(self.root_dir + p[len(original_root):])
예제 #4
0
    def generate_index(self):
        """Generated the repository index file (`index.gz`)

        index.gz is the compressed sqlite index containing all of the succeeded
        packages in the repository pool.

        Return the number of packages added to the repository index.
        """
        assert exists(self.repository.path)
        idx_path = xjoin(self.repository.path, 'index')
        idx_gz_path = idx_path + '.gz'
        
        sh.rm(idx_path)
        db = RepoPackageDatabase(idx_path, touch=True)
        
        # Tag BE packages; so client may use it to determine if a package is
        # available only to BE customers or not.
        # See also: RepoPackage.requires_be_license property
        pkgtags = 'be' if self.repository.name == 'be' else ''

        with closing(db):
            LOG.debug('finding packages in %s', self.repository.path)
            packages = self.repository.find_packages()

            LOG.debug('processing %d packages', len(packages))
            rpkg_list = [
                RepoPackage.create_from(
                    BinaryPackage(**self._read_info_json(pkgfile)),
                    relpath=relpath(pkgfile, self.repository.path),
                    tags=pkgtags)
                for pkgfile in textui.ProgressBar.iterate(packages, note="Package")
            ]
            
            # Optimize index size by removing the "description" field.
            # PyPI's descriptions are typically very long - see
            # http://pypi.python.org/pypi/zc.buildout for example - hence we
            # must remove them from the index.
            for rpkg in rpkg_list:
                rpkg.description = ''

            # keep only the latest pkg_version in index
            LOG.debug("pruning older pkg_version's")
            rpkg_list = _prune_older_binary_releases(rpkg_list)
            LOG.debug('.. resulting in %d packages', len(rpkg_list))

            LOG.info('  writing index (please wait) ...')
            with db.transaction() as session:
                session.add_all(rpkg_list)
                session.commit()
                session.close()

        LOG.info('  compressing index: ...%s%s',
                 os.path.basename(idx_gz_path),
                 (' (%d)' % len(rpkg_list)) if rpkg_list else '')
        sh.rm(idx_gz_path)
        with closing(gzip.open(idx_gz_path, 'wb')) as f:
            f.write(open(idx_path, 'rb').read())
        sh.rm(idx_path)

        return len(rpkg_list)
예제 #5
0
    def _read_info_json(self, pypm_file):
        """Read cached info.json (as dict) from the .d/ directory
        
        If cached version is missing, read from the package file itself, which
        would be an expensive operation.
        """
        info_json_loc = xjoin(pypm_file + '.d', 'info.json')

        try:
            s = self.repository.uptree.open_and_read(info_json_loc)
        except IOError as e:
            # There seems to be no .d/info.json file; perhaps this is a
            # 'custom' that is not managed by pypm-builder. So let's extract
            # info.json from the package file (.pypm) even though that is
            # expensive (so we will also warn the user)
            LOG.warn(
                'Cache file (.d/info.json) missing; retrieving from %s', pypm_file)
            s = PackageFile(pypm_file).retrieve_info_json()
            
        d = json.loads(s)

        # It is not clear whether info.json's "name" field is canonical (i.e.,
        # lower case safe version of name, that is guarateed to be same).
        # Therefore, we do one final conversion there.
        d['name'] = pkg_resources.safe_name(d['name']).lower()  
        return d
예제 #6
0
파일: base.py 프로젝트: wyj5230/xunlei_vip
    def _transform_path(self, p):
        """Transform original path using our image_root"""
        # Find the "base directory" of target Python executable
        # And then replace that with our image root in `p`
        if sys.platform.startswith('win'):
            original_root = xjoin(P.dirname(self.target_python_exe))
        else:
            original_root = xjoin(P.dirname(self.target_python_exe), '..')

        # Use `lower()` for case-insensitive filesystems like NTFS/FAT
        if p.lower().startswith(self.root_dir.lower()):
            return p
        else:
            assert p.lower().startswith(original_root.lower()), \
                '"%s" is different from "%s"' % (p, original_root)
            return xjoin(self.root_dir + p[len(original_root):])
예제 #7
0
 def genidx_mreposet(self, filter):
     logsdir = xjoin(self.options.multi_repository_set_path, '_logs')
     with log.archivedby(logging.getLogger('pypm'),
                         logsdir,
                         'repository_genidx',
                         level=logging.INFO,
                         formatter=logging.Formatter('%(asctime)s %(message)s')):
         mreposet = MultiRepositorySet(
             self.options.multi_repository_set_path,
             self.options.configfile
         )
         
         skipped = 0
         LOG.info('Generating indices for repositories in: %s', mreposet.path)
         with textui.longrun(LOG):
             for repo in mreposet:
                 if filter and not fnmatch(repo.path, '*'+filter+'*'):
                     skipped += 1
                     continue
                 LOG.info('')
                 LOG.info('-> {0.name:6}{0.pyver:6}{0.osarch:15}'.format(repo))
                 LOG.info('  %s', repo.path)
                 idx = RepositoryIndex(repo)
                 idx.generate_index()
         if skipped:
             LOG.warn('skipped %d repositories', skipped)
예제 #8
0
    def genidx_mreposet(self, filters):
        logsdir = xjoin(self.options.multi_repository_set_path, '_logs')
        with log.archivedby(
                logging.getLogger('pypm'),
                logsdir,
                'repository_genidx',
                level=logging.INFO,
                formatter=logging.Formatter('%(asctime)s %(message)s')):
            mreposet = MultiRepositorySet(
                self.options.multi_repository_set_path,
                self.options.configfile)

            skipped = 0
            LOG.info('Generating indices for repositories in: %s',
                     mreposet.path)
            with textui.longrun(LOG):
                for repo in mreposet:
                    if filters and not any(
                        [fnmatch(repo.path, '*' + f + '*') for f in filters]):
                        skipped += 1
                        continue
                    LOG.info('')
                    LOG.info(
                        '-> {0.name:6}{0.pyver:6}{0.osarch:15}'.format(repo))
                    LOG.info('  %s', repo.path)
                    idx = RepositoryIndex(repo)
                    idx.generate_index()
            if skipped:
                LOG.info('ALERT: skipped %d repositories', skipped)
예제 #9
0
    def _read_info_json(self, pypm_file):
        """Read cached info.json (as dict) from the .d/ directory
        
        If cached version is missing, read from the package file itself, which
        would be an expensive operation.
        """
        info_json_loc = xjoin(pypm_file + '.d', 'info.json')

        try:
            s = self.repository.uptree.open_and_read(info_json_loc)
        except IOError as e:
            # There seems to be no .d/info.json file; perhaps this is a
            # 'custom' that is not managed by pypm-builder. So let's extract
            # info.json from the package file (.pypm) even though that is
            # expensive (so we will also warn the user)
            LOG.info(
                'ALERT: Cache file (.d/info.json) missing; extracting from %s',
                pypm_file)
            s = PackageFile(pypm_file).retrieve_info_json()

        d = json.loads(s)

        # It is not clear whether info.json's "name" field is canonical (i.e.,
        # lower case safe version of name, that is guarateed to be same).
        # Therefore, we do one final conversion there.
        d['name'] = pkg_resources.safe_name(d['name']).lower()
        return d
예제 #10
0
    def get_repository(self, pyver, osarch, autocreate=False):
        path = xjoin(self.path, pyver, osarch)
        url = '/'.join([self.url, pyver, osarch])
        if autocreate:
            # create, if does not already exist
            sh.mkdirs(path)

        return Repository(path, self.name, pyver, osarch, url)
예제 #11
0
 def get_repository(self, pyver, osarch, autocreate=False):
     path = xjoin(self.path, pyver, osarch)
     url = '/'.join([self.url, pyver, osarch])
     if autocreate:
         # create, if does not already exist
         sh.mkdirs(path)
         
     return Repository(path, self.name, pyver, osarch, url)
예제 #12
0
    def get_index(self):
        """Return an existing index as ``RepoPackageDatabase``

        Returned index database corresponds to a temporary file (as the index
        file is originally compressed; it needs to be extracted to a temporary
        location) .. hence any attempts to "write" on the returned index
        database will be futile.
        """
        return RepoPackageDatabase(_ungzip(xjoin(self.repository.path, 'index.gz')))
예제 #13
0
    def python_exe(self):
        # NOTE: we CANNOT use get_install_scheme_path() in `script_dirs`, as
        # that would lead to infinite loop (stack overflow). This is why the
        # method call is commented out below; and we must resort to heuristics.
        if WIN:
            executable = 'Python.exe'
            executable_alt = [
                'Python[23456789].[0123456789].exe',  # eg: python3.1.exe
                'Python[23456789].exe',  # eg: python3.exe
                'Python[23456789][0123456789].exe',  # eg: python31.exe (old apy)
            ]
            script_dirs = [
                xjoin(self.root_dir),
                xjoin(self.root_dir, 'Scripts'),  # virtualenv
                xjoin(self.root_dir, 'bin'),  # some, like VanL, do this
                # self.get_install_scheme_path('scripts')
            ]
        else:
            executable = 'python'
            executable_alt = [
                'python[23456789].[0123456789]',  # eg: python3.1 
                'python[23456789]',  # eg: python3
            ]
            script_dirs = [
                xjoin(self.root_dir, 'bin'),
                # self.get_install_scheme_path('scripts')
            ]

        for script_dir in script_dirs:
            python_exe = os.path.join(script_dir, executable)
            if os.path.exists(python_exe):
                return python_exe

        # In one last attempt, try finding executables with pyver in them. eg:
        # python31.exe or python3.1
        for script_dir in script_dirs:
            for pat in executable_alt:
                for python_exe in glob(os.path.join(script_dir, pat)):
                    return python_exe

        raise IOError(
            'cannot find the Python executable; ' + \
            'searched in these directories:\n\t%s' % \
                '\n\t'.join(script_dirs))
예제 #14
0
    def python_exe(self):
        # NOTE: we CANNOT use get_install_scheme_path() in `script_dirs`, as
        # that would lead to infinite loop (stack overflow). This is why the
        # method call is commented out below; and we must resort to heuristics.
        if WIN:
            executable = 'Python.exe'
            executable_alt = [
                'Python[23456789].[0123456789].exe', # eg: python3.1.exe
                'Python[23456789].exe',              # eg: python3.exe
                'Python[23456789][0123456789].exe',  # eg: python31.exe (old apy)
                ]
            script_dirs = [
                xjoin(self.root_dir),
                xjoin(self.root_dir, 'Scripts'),  # virtualenv
                xjoin(self.root_dir, 'bin'),      # some, like VanL, do this
                # self.get_install_scheme_path('scripts')
            ]
        else:
            executable = 'python'
            executable_alt = [
                'python[23456789].[0123456789]', # eg: python3.1 
                'python[23456789]',              # eg: python3
            ]
            script_dirs = [
                xjoin(self.root_dir, 'bin'),
                # self.get_install_scheme_path('scripts')
            ]

        for script_dir in script_dirs:
            python_exe = os.path.join(script_dir, executable)
            if os.path.exists(python_exe):
                return python_exe

        # In one last attempt, try finding executables with pyver in them. eg:
        # python31.exe or python3.1
        for script_dir in script_dirs:
            for pat in executable_alt:
                for python_exe in glob(os.path.join(script_dir, pat)):
                    return python_exe

        raise IOError(
            'cannot find the Python executable; ' + \
            'searched in these directories:\n\t%s' % \
                '\n\t'.join(script_dirs))
예제 #15
0
    def get_index(self):
        """Return an existing index as ``RepoPackageDatabase``

        Returned index database corresponds to a temporary file (as the index
        file is originally compressed; it needs to be extracted to a temporary
        location) .. hence any attempts to "write" on the returned index
        database will be futile.
        """
        return RepoPackageDatabase(
            _ungzip(xjoin(self.repository.path, 'index.gz')))
예제 #16
0
 def __init__(self, cmd, timeout, stdout, stderr):
     msg = '\n'.join([
         'timed out; ergo process is terminated',
         'seconds elapsed: {0}'.format(timeout),
         'command: {0}'.format(cmd),
         'pwd: {0}'.format(xjoin(os.getcwd())),
         'stderr:\n{0}'.format(stderr),
         'stdout:\n{0}'.format(stdout),
         ])
     super(RunTimedout, self).__init__(msg)
예제 #17
0
 def __init__(self, p, cmd, stdout, stderr):
     self.stdout = stdout
     self.stderr = stderr
     
     msg = '\n'.join([
         'non-zero returncode: {0}'.format(p.returncode),
         'command: {0}'.format(cmd),
         'pwd: {0}'.format(xjoin(os.getcwd())),
         'stderr:\n{0}'.format(stderr),
         'stdout:\n{0}'.format(stdout),
         ])
     super(RunNonZeroReturn, self).__init__(msg)
예제 #18
0
파일: _proc.py 프로젝트: ActiveState/applib
    def __init__(self, cmd, stdout, stderr, errors):
        self.stdout = stdout
        self.stderr = stderr

        msg = errors[:]
        msg.extend([
            'command: {0}'.format(safe_unicode(cmd)),
            'pwd: {0}'.format(xjoin(os.getcwd()))])
        
        if stderr is None:
            msg.append(
                'OUTPUT:\n{0}'.format(_limit_str(safe_unicode(stdout))))
        else:
            msg.extend([
                'STDERR:\n{0}'.format(_limit_str(safe_unicode(stderr))),
                'STDOUT:\n{0}'.format(_limit_str(safe_unicode(stdout)))])

        super(RunError, self).__init__('\n'.join(msg))
예제 #19
0
    def __init__(self, cmd, stdout, stderr, errors):
        self.stdout = stdout
        self.stderr = stderr

        msg = errors[:]
        msg.extend([
            'command: {0}'.format(safe_unicode(cmd)),
            'pwd: {0}'.format(xjoin(os.getcwd()))
        ])

        if stderr is None:
            msg.append('OUTPUT:\n{0}'.format(_limit_str(safe_unicode(stdout))))
        else:
            msg.extend([
                'STDERR:\n{0}'.format(_limit_str(safe_unicode(stderr))),
                'STDOUT:\n{0}'.format(_limit_str(safe_unicode(stdout)))
            ])

        super(RunError, self).__init__('\n'.join(msg))
예제 #20
0
 def pack_contents(fn, parentdir):
     """Pack the contents of directory ``parentdir`` into ``fn``"""
     sh.pack_archive(
         fn,
         [xjoin(parentdir, f) for f in os.listdir(parentdir)],
         parentdir)
예제 #21
0
import six.moves
from applib import sh
from applib.misc import xjoin

from pypm.common import net, licensing
from pypm.common.util import wrapped, concise_path
from pypm.common.package import PackageFile
from pypm.common.repository import RepoPackage
from pypm.common.supported import PLATNAME
from pypm.client.base import application
from pypm.client import error

LOG = logging.getLogger(__name__)

# TODO: we are not actually utilizing the download "cache" yet.
DOWNLOAD_CACHE = xjoin(application.locations.user_cache_dir, 'download-cache')


class Downloader:
    def __init__(self, pypmenv):
        self.pypmenv = pypmenv

    def download_packages(self, packages):
        """Download the given list of packages
        
        We first download the BE packages first in order to catch license
        related errors early. This does not, however, prevent late errors
        occuring due to missing/expired license.
        
        Return a dictionary of location to downloaded packages.
        """
예제 #22
0
 def get_local_index_path(self, remote_repository):
     return xjoin(
         self.path,
         remote_repository.get_unique_id(),
         'index')
예제 #23
0
 def pack_contents(fn, parentdir):
     sh.pack_archive(
         fn,
         [xjoin(parentdir, f) for f in os.listdir(parentdir)],
         parentdir)
예제 #24
0
    def generate_index(self):
        """Generated the repository index file (`index.gz`)

        index.gz is the compressed sqlite index containing all of the succeeded
        packages in the repository pool.

        Return the number of packages added to the repository index.
        """
        from pypm.grail.package import PackageShare
        assert P.exists(self.repository.path)
        idx_path = xjoin(self.repository.path, 'index')
        idx_gz_path = idx_path + '.gz'
        
        sh.rm(idx_path)
        db = RepoPackageDatabase(idx_path, touch=True)
        
        # Tag BE packages; so client may use it to determine if a package is
        # available only to BE customers or not.
        # See also: RepoPackage.requires_be_license property
        pkgtags = 'be' if self.repository.name == 'be' else ''

        # Load package-specific data from share/p/*
        pkgdata = dict([(s.name, s) for s in PackageShare.all()])

        with closing(db):
            LOG.debug('finding packages in %s', self.repository.path)
            packages = self.repository.find_packages()

            LOG.debug('processing %d packages', len(packages))
            rpkg_list = [
                RepoPackage.create_from(
                    BinaryPackage(**self._read_info_json(pkgfile)),
                    relpath=P.relpath(pkgfile, self.repository.path),
                    tags=pkgtags)
                for pkgfile in textui.ProgressBar.iterate(packages, note="Package")
            ]
            
            for rpkg in rpkg_list:
                # Optimize index size by removing the "description" field.
                # PyPI's descriptions are typically very long - see
                # http://pypi.python.org/pypi/zc.buildout for example - hence we
                # must remove them from the index.
                rpkg.description = ''
                if rpkg.name in pkgdata:
                    # Add package notes to the description^Wextra field
                    # See pypm.common.package.RepoPackage.FIELDS to understand
                    # why we are abusing this field.
                    notes = list(pkgdata[rpkg.name].get_notes_for(
                        pyver=rpkg.pyver, osarch=rpkg.osarch))

                    rpkg.description = json.dumps({
                        'notes': notes
                    })
                    LOG.debug('Patching "description" field for %s', rpkg)

            # keep only the latest pkg_version in index
            LOG.debug("pruning older pkg_version's")
            rpkg_list = _prune_older_binary_releases(rpkg_list)
            LOG.debug('.. resulting in %d packages', len(rpkg_list))

            LOG.info('  writing index (please wait) ...')
            with db.transaction() as session:
                session.add_all(rpkg_list)
                session.commit()
                session.close()

        LOG.info('  compressing index: ...%s%s',
                 os.path.basename(idx_gz_path),
                 (' (%d)' % len(rpkg_list)) if rpkg_list else '')
        sh.rm(idx_gz_path)
        with closing(gzip.open(idx_gz_path, 'wb')) as f:
            f.write(open(idx_path, 'rb').read())
        sh.rm(idx_path)

        return len(rpkg_list)
예제 #25
0
    def get_abspath(self, relpath):
        """Get absolute path to a file described by `relpath`

        `relpath` is supposed to be inside the Python installation.
        """
        return xjoin(self.base_dir, relpath)
예제 #26
0
 def pack_contents(fn, parentdir):
     """Pack the contents of directory ``parentdir`` into ``fn``"""
     sh.pack_archive(
         fn, [xjoin(parentdir, f) for f in os.listdir(parentdir)],
         parentdir)
예제 #27
0
    def generate_index(self):
        """Generated the repository index file (`index.gz`)

        index.gz is the compressed sqlite index containing all of the succeeded
        packages in the repository pool.

        Return the number of packages added to the repository index.
        """
        from pypm.grail.package import PackageShare
        assert P.exists(self.repository.path)
        idx_path = xjoin(self.repository.path, 'index')
        idx_gz_path = idx_path + '.gz'

        sh.rm(idx_path)
        db = RepoPackageDatabase(idx_path, touch=True)

        # Tag BE packages; so client may use it to determine if a package is
        # available only to BE customers or not.
        # See also: RepoPackage.requires_be_license property
        pkgtags = 'be' if self.repository.name == 'be' else ''

        # Load package-specific data from share/p/*
        pkgdata = dict([(s.name, s) for s in PackageShare.all()])

        with closing(db):
            LOG.debug('finding packages in %s', self.repository.path)
            packages = self.repository.find_packages()

            LOG.debug('processing %d packages', len(packages))
            rpkg_list = [
                RepoPackage.create_from(
                    BinaryPackage(**self._read_info_json(pkgfile)),
                    relpath=P.relpath(pkgfile, self.repository.path),
                    tags=pkgtags)
                for pkgfile in textui.ProgressBar.iterate(packages,
                                                          note="Package")
            ]

            for rpkg in rpkg_list:
                # Optimize index size by removing the "description" field.
                # PyPI's descriptions are typically very long - see
                # http://pypi.python.org/pypi/zc.buildout for example - hence we
                # must remove them from the index.
                rpkg.description = ''
                if rpkg.name in pkgdata:
                    # Add package notes to the description^Wextra field
                    # See pypm.common.package.RepoPackage.FIELDS to understand
                    # why we are abusing this field.
                    notes = list(pkgdata[rpkg.name].get_notes_for(
                        pyver=rpkg.pyver, osarch=rpkg.osarch))

                    rpkg.description = json.dumps({'notes': notes})
                    LOG.debug('Patching "description" field for %s', rpkg)

            # keep only the latest pkg_version in index
            LOG.debug("pruning older pkg_version's")
            rpkg_list = _prune_older_binary_releases(rpkg_list)
            LOG.debug('.. resulting in %d packages', len(rpkg_list))

            LOG.info('  writing index (please wait) ...')
            with db.transaction() as session:
                session.add_all(rpkg_list)
                session.commit()
                session.close()

        LOG.info('  compressing index: ...%s%s', os.path.basename(idx_gz_path),
                 (' (%d)' % len(rpkg_list)) if rpkg_list else '')
        sh.rm(idx_gz_path)
        with closing(gzip.open(idx_gz_path, 'wb')) as f:
            f.write(open(idx_path, 'rb').read())
        sh.rm(idx_path)

        return len(rpkg_list)
예제 #28
0
    def get_abspath(self, relpath):
        """Get absolute path to a file described by `relpath`

        `relpath` is supposed to be inside the Python installation.
        """
        return xjoin(self.base_dir, relpath)
예제 #29
0
import six.moves
from applib import sh
from applib.misc import xjoin

from pypm.common import net, python, licensing
from pypm.common.util import existing, wrapped, concise_path
from pypm.common.package import PackageFile
from pypm.common.repository import RepoPackage
from pypm.common.supported import PLATNAME
from pypm.client.base import application
from pypm.client import error

LOG = logging.getLogger(__name__)

# TODO: we are not actually utilizing the download "cache" yet.
DOWNLOAD_CACHE = xjoin(application.locations.user_cache_dir, 'download-cache')


class Downloader:
    
    def __init__(self, pypmenv):
        self.pypmenv = pypmenv
    
    def download_packages(self, packages):
        """Download the given list of packages
        
        We first download the BE packages first in order to catch license
        related errors early. This does not, however, prevent late errors
        occuring due to missing/expired license.
        
        Return a dictionary of location to downloaded packages.
예제 #30
0
 def get_local_index_path(self, remote_repository):
     return xjoin(self.path, remote_repository.get_unique_id(), 'index')