Beispiel #1
0
    def _build_session(self, options, retries=None, timeout=None):
        session = PipSession(
            cache=(
                normalize_path(os.path.join(options.cache_dir, "http"))
                if options.cache_dir else None
            ),
            retries=retries if retries is not None else options.retries,
            insecure_hosts=options.trusted_hosts,
        )

        # Handle custom ca-bundles from the user
        if options.cert:
            session.verify = options.cert

        # Handle SSL client certificate
        if options.client_cert:
            session.cert = options.client_cert

        # Handle timeouts
        if options.timeout or timeout:
            session.timeout = (
                timeout if timeout is not None else options.timeout
            )

        # Handle configured proxies
        if options.proxy:
            session.proxies = {
                "http": options.proxy,
                "https": options.proxy,
            }

        # Determine if we can prompt the user for authentication or not
        session.auth.prompting = not options.no_input

        return session
    def _build_session(self, options):
        session = PipSession(retries=options.retries)

        # Handle custom ca-bundles from the user
        if options.cert:
            session.verify = options.cert

        # Handle SSL client certificate
        if options.client_cert:
            session.cert = options.client_cert

        # Handle timeouts
        if options.timeout:
            session.timeout = options.timeout

        # Handle configured proxies
        if options.proxy:
            session.proxies = {
                "http": options.proxy,
                "https": options.proxy,
            }

        # Determine if we can prompt the user for authentication or not
        session.auth.prompting = not options.no_input

        return session
Beispiel #3
0
    def _build_session(options, retries=None, timeout=None):
        session = PipSession(
            cache=(
                normalize_path(os.path.join(options.get('cache_dir'), 'http'))
                if options.get('cache_dir') else None
            ),
            retries=retries if retries is not None else options.get('retries'),
            insecure_hosts=options.get('trusted_hosts'),
        )

        # Handle custom ca-bundles from the user
        if options.get('cert'):
            session.verify = options.get('cert')

        # Handle SSL client certificate
        if options.get('client_cert'):
            session.cert = options.get('client_cert')

        # Handle timeouts
        if options.get('timeout') or timeout:
            session.timeout = (
                timeout if timeout is not None else options.get('timeout')
            )

        # Handle configured proxies
        if options.get('proxy'):
            session.proxies = {
                'http': options.get('proxy'),
                'https': options.get('proxy'),
            }

        # Determine if we can prompt the user for authentication or not
        session.auth.prompting = not options.get('no_input')

        return session
def get_versions(package):
    host = "https://pypi.python.org/simple/"
    url = urlparse.urljoin(host, package)
    url = url + '/'
    session = PipSession()
    session.timeout = 15
    session.auth.prmpting = True
    pf = PackageFinder(find_links=[], index_urls=host, use_wheel=True, allow_external=[], allow_unverified=[], allow_all_external=False, allow_all_prereleases=False, process_dependency_links=False, session=session,)

    location = [Link(url, trusted=True)]
    req = InstallRequirement.from_line(package, None)
    versions = []
    for page in pf._get_pages(location, req):
        versions = versions + [version for _, _, version in pf._package_versions(page.links, package)]
    return versions
Beispiel #5
0
    def _get_content_type(url, session=None):
        """Get the Content-Type of the given url, using a HEAD request"""
        if session is None:
            session = PipSession()

        scheme, netloc, path, query, fragment = urlparse.urlsplit(url)
        if scheme not in ('http', 'https', 'ftp', 'ftps'):
            # FIXME: some warning or something?
            # assertion error?
            return ''

        resp = session.head(url, allow_redirects=True)
        resp.raise_for_status()

        return resp.headers.get("Content-Type", "")
    def _build_session(self, options):
        session = PipSession(
            cache=normalize_path(os.path.join(options.cache_dir, "http")),
            retries=options.retries,
        )

        # Handle custom ca-bundles from the user
        if options.cert:
            session.verify = options.cert
        elif options.no_check_certificate:
            session.verify = False

        # Handle SSL client certificate
        if options.client_cert:
            session.cert = options.client_cert

        # Handle timeouts
        if options.timeout:
            session.timeout = options.timeout

        # Handle configured proxies
        if options.proxy:
            session.proxies = {
                "http": options.proxy,
                "https": options.proxy,
            }

        # Determine if we can prompt the user for authentication or not
        session.auth.prompting = not options.no_input

        return session
Beispiel #7
0
 def basic_reqset(self, **kwargs):
     return RequirementSet(build_dir=os.path.join(self.tempdir, 'build'),
                           src_dir=os.path.join(self.tempdir, 'src'),
                           download_dir=None,
                           session=PipSession(),
                           **kwargs)
Beispiel #8
0
# -*- coding: utf-8 -*-
import os
from setuptools import setup, find_packages
from pip.req import parse_requirements
from pip.download import PipSession

VERSION = '{{version}}'

REQUIREMENTS = parse_requirements('requirements.txt', session=PipSession())
with open(os.path.join(os.path.dirname(__file__), 'README.md')) as readme:
    README = readme.read()

setup(
    name='{{project_name}}',
    version=VERSION,
    description='{{description}}',
    long_description=README,
    include_package_data=True,
    packages=find_packages('{{project_name}}'),
    install_requires=[str(r.req) for r in REQUIREMENTS],
    entry_points={
        'console_scripts': [
            'manage = {{project_name}}.manage:do_manage',
        ],
    },
)
Beispiel #9
0
import re, ast, os
from os.path import relpath, join
from pip.req import parse_requirements
from setuptools import setup, find_packages


def get_reqs(reqs):
    return [str(ir.req) for ir in reqs]


try:
    install_reqs = get_reqs(parse_requirements("requirements.txt"))
except TypeError:
    from pip.download import PipSession
    install_reqs = get_reqs(
        parse_requirements("requirements.txt", session=PipSession()))


def find_package_data(data_root, package_root):
    files = []
    for root, dirnames, filenames in os.walk(data_root):
        for fn in filenames:
            files.append(relpath(join(root, fn), package_root))
    return files


def get_version():
    _version_re = re.compile(r'__version__\s+=\s+(.*)')
    with open('OpenMMCubes/__init__.py', 'rb') as f:
        version = str(
            ast.literal_eval(
Beispiel #10
0
def test_find_all_versions_find_links(data):
    finder = PackageFinder(
        [data.find_links], [], session=PipSession())
    versions = finder._find_all_versions('simple')
    assert [str(v.version) for v in versions] == ['3.0', '2.0', '1.0']
Beispiel #11
0
def test_find_all_versions_find_links_and_index(data):
    finder = PackageFinder(
        [data.find_links], [data.index_url('simple')], session=PipSession())
    versions = finder._find_all_versions('simple')
    # first the find-links versions then the page versions
    assert [str(v.version) for v in versions] == ['3.0', '2.0', '1.0', '1.0']
Beispiel #12
0
def test_tilde(data):
    """Finder can accept a path with ~ in it and will normalize it."""
    finder = PackageFinder(['~/python-pkgs'], [], session=PipSession())
    req = InstallRequirement.from_line("gmpy")
    with pytest.raises(DistributionNotFound):
        finder.find_requirement(req, False)
Beispiel #13
0
def test_incorrect_case_file_index(data):
    """Test PackageFinder detects latest using wrong case"""
    req = InstallRequirement.from_line('dinner', None)
    finder = PackageFinder([], [data.find_links3], session=PipSession())
    link = finder.find_requirement(req, False)
    assert link.url.endswith("Dinner-2.0.tar.gz")
Beispiel #14
0
    def get_page(cls, link, req, cache=None, skip_archives=True, session=None):
        if session is None:
            session = PipSession()

        url = link.url
        url = url.split('#', 1)[0]
        if cache.too_many_failures(url):
            return None

        # Check for VCS schemes that do not support lookup as web pages.
        from pip.vcs import VcsSupport
        for scheme in VcsSupport.schemes:
            if url.lower().startswith(scheme) and url[len(scheme)] in '+:':
                logger.debug('Cannot look at %(scheme)s URL %(link)s' %
                             locals())
                return None

        if cache is not None:
            inst = cache.get_page(url)
            if inst is not None:
                return inst
        try:
            if skip_archives:
                if cache is not None:
                    if cache.is_archive(url):
                        return None
                filename = link.filename
                for bad_ext in ['.tar', '.tar.gz', '.tar.bz2', '.tgz', '.zip']:
                    if filename.endswith(bad_ext):
                        content_type = cls._get_content_type(
                            url,
                            session=session,
                        )
                        if content_type.lower().startswith('text/html'):
                            break
                        else:
                            logger.debug(
                                'Skipping page %s because of Content-Type: %s'
                                % (link, content_type))
                            if cache is not None:
                                cache.set_is_archive(url)
                            return None
            logger.debug('Getting page %s' % url)

            # Tack index.html onto file:// URLs that point to directories
            (scheme, netloc, path, params, query,
             fragment) = urlparse.urlparse(url)
            if scheme == 'file' and os.path.isdir(url2pathname(path)):
                # add trailing slash if not present so urljoin doesn't trim final segment
                if not url.endswith('/'):
                    url += '/'
                url = urlparse.urljoin(url, 'index.html')
                logger.debug(' file: URL is directory, getting %s' % url)

            resp = session.get(url, headers={"Accept": "text/html"})
            resp.raise_for_status()

            # The check for archives above only works if the url ends with
            #   something that looks like an archive. However that is not a
            #   requirement. For instance http://sourceforge.net/projects/docutils/files/docutils/0.8.1/docutils-0.8.1.tar.gz/download
            #   redirects to http://superb-dca3.dl.sourceforge.net/project/docutils/docutils/0.8.1/docutils-0.8.1.tar.gz
            #   Unless we issue a HEAD request on every url we cannot know
            #   ahead of time for sure if something is HTML or not. However we
            #   can check after we've downloaded it.
            content_type = resp.headers.get('Content-Type', 'unknown')
            if not content_type.lower().startswith("text/html"):
                logger.debug('Skipping page %s because of Content-Type: %s' %
                             (link, content_type))
                if cache is not None:
                    cache.set_is_archive(url)
                return None

            inst = cls(resp.text, resp.url, resp.headers, trusted=link.trusted)
        except requests.HTTPError as exc:
            level = 2 if exc.response.status_code == 404 else 1
            cls._handle_fail(req, link, exc, url, cache=cache, level=level)
        except requests.ConnectionError as exc:
            cls._handle_fail(
                req,
                link,
                "connection error: %s" % exc,
                url,
                cache=cache,
            )
        except requests.Timeout:
            cls._handle_fail(req, link, "timed out", url, cache=cache)
        except SSLError as exc:
            reason = ("There was a problem confirming the ssl certificate: "
                      "%s" % exc)
            cls._handle_fail(
                req,
                link,
                reason,
                url,
                cache=cache,
                level=2,
                meth=logger.notify,
            )
        else:
            if cache is not None:
                cache.add_page([url, resp.url], inst)
            return inst
Beispiel #15
0
import setuptools

from os import path
from pip.req import parse_requirements
from pip.download import PipSession

here = path.abspath(path.join(path.dirname(path.abspath(__file__))))

requirements_txt = path.join(here, "requirements.txt")
requirements = parse_requirements(requirements_txt, session=PipSession())
requirements = [str(ir.req) for ir in requirements]

# Ugly hack to get the version prior to installation, without having the amcat
# package depend on setup.py.
version = open(path.join(here, "amcat", "_version.py"),
               mode="r",
               encoding="ascii")
version = next(
    filter(lambda s: s.startswith("__version__"), version.readlines()))
version = version.split("=")[-1].strip().strip("'").strip('"')

# Package anything you can find, except for tests
packages = setuptools.find_packages(here, exclude=["*.tests"])

description = """
System for document management and analysis. The purpose of AmCAT is to
make it easier to conduct manual or automatic analyses of texts for (social)
scientific purposes. AmCAT can improve the use and standard of content
analysis in the social sciences and stimulate sharing data and analyses.
"""
Beispiel #16
0
import os
import setuptools
from pip.download import PipSession
from pip.req import parse_requirements

PACKAGE_PATH = os.path.dirname(os.path.realpath(__file__))

# parse_requirements() returns generator of pip.req.InstallRequirement objects
PACKAGE_REQS = parse_requirements("requirements.txt", session=PipSession())

# reqs is a list of requirement
# e.g. ['tornado==3.2.2', '...']
REQS = [str(ir.req) for ir in PACKAGE_REQS]

if __name__ == "__main__":
    setuptools.setup(
        name="pubtrans",
        version="0.0.0",
        description="Media Location Information Service",
        author="Luciano Afranllie",
        namespace_packages=['pubtrans'],
        packages=setuptools.find_packages(PACKAGE_PATH, exclude=["*.test",
                                                                 "*.test.*",
                                                                 "test.*",
                                                                 "test"]),
        keywords="pubtrans",
        install_requires=REQS,
        include_package_data=True,
        entry_points={
            'console_scripts': [
                'pubtrans-runservice = pubtrans.common.runservice:main'
Beispiel #17
0
# -*- coding: utf-8 -*-

from __future__ import absolute_import, division, print_function

from pip.req import parse_requirements
from pip.download import PipSession
from setuptools import setup, find_packages
from tornado_graphql_example.version import __version__

requirements = [
    str(r.req)
    for r in parse_requirements('requirements.in', session=PipSession())
]

setup(
    name='tornado_graphql_example',
    version=__version__,
    description='An example GraphQL API Server implemented with Tornado',
    long_description='An example GraphQL API Server implemented with Tornado',
    author='',
    author_email='',
    packages=find_packages(),
    install_requires=requirements,
    entry_points={
        'console_scripts': [
            'tornado-graphql-example = tornado_graphql_example:main',
            'tornado-graphql-example-jobserver = tornado_graphql_example.jobserver:main',
            'tornado-graphql-example-jobserverapp = tornado_graphql_example.jobserverapp:main'
        ]
    })
# Always prefer setuptools over distutils
from setuptools import setup, find_packages
import os
from os import path
import inspect
from pip.req import parse_requirements
from pip.download import PipSession

here = path.abspath(path.dirname(__file__))

# Get path from current file location
dirPath = os.path.dirname(
    os.path.abspath(inspect.getfile(inspect.currentframe())))
requirementsPath = os.path.join(dirPath, "requirements.txt")
# parse_requirements() returns generator of pip.req.InstallRequirement objects
install_reqs = parse_requirements(requirementsPath, session=PipSession())

# Create the list of requirements
reqs = [str(ir.req) for ir in install_reqs]

setup(
    name='dataactbrokerbackend',
    version='0.0.1',
    description='DATA Act Broker Backend',
    long_description=
    'The DATA Act Broker API powers the DATA Act\'s data submission process.',
    url=
    'https://github.com/fedspendingtransparency/data-act-broker-backend.git',
    author='US Treasury',
    author_email='*****@*****.**',
    license='CC0 1.0 Universal (CC0 1.0) Public Domain Dedication',
Beispiel #19
0
from setuptools import setup, find_packages
from pip.req import parse_requirements
from pip.download import PipSession
from insightful.version import __version__

req_file = 'requirements.txt'
reqs = [str(r.req) for r in parse_requirements(req_file, session=PipSession())]

setup(name='diagnostic',
      version=__version__,
      install_requires=reqs,
      packages=find_packages(),
      test_suite='tests')
Beispiel #20
0
import os
from pip.download import PipSession
from pip.req import parse_requirements
from setuptools import setup

BASE_DIR = os.path.dirname(os.path.realpath(__file__))
reqs_file = os.path.join(BASE_DIR, 'requirements.txt')
install_reqs = parse_requirements(reqs_file, session=PipSession())

setup(
    name='aws-portknock',
    version='0.2',
    py_modules=['aws_portknock'],
    description='Port knocking for AWS security groups, updated from Michel Alexandre Salim',
    author='Hunter Tom',
    author_email='TBD',
    url='https://github.com/hunttom/aws-portknock',
    download_url='https://github.com/hunttom/aws-portknock/tarball/0.2',
    keywords=['aws'],
    classifiers=[],
    install_requires=[str(r.req)
                      for r in install_reqs],
    entry_points='''
        [console_scripts]
        aws-portknock=aws_portknock:cli
    ''',
)
Beispiel #21
0
from setuptools import find_packages, setup

with open('README.rst', 'r') as f:
    long_description = f.read()

setup(
    name="Ion",
    description="The next-generation Intranet platform for TJHSST",
    long_description=long_description,
    author="The TJHSST Computer Systems Lab",
    author_email="*****@*****.**",
    url="https://github.com/tjcsl/ion",
    version="1.0",
    license="GPL",
    test_suite='intranet.test.test_suite.run_tests',
    setup_requires=['pip>=6.0', 'setuptools_git'],  # session param
    install_requires=[
        str(dep.req)
        for dep in parse_requirements('requirements.txt', session=PipSession())
    ],  # FIXME: preserve markers
    packages=find_packages(),
    classifiers=[
        'Development Status :: 5 - Production/Stable',
        'License :: OSI Approved :: GNU General Public License v2 or later (GPLv2+)',
        'Operating System :: POSIX :: Linux',
        'Programming Language :: Python :: 3.4',
        'Programming Language :: Python :: 3.5',
        'Framework :: Django :: 1.9',
    ],
)
Beispiel #22
0
def get_requirements():
    requirements = parse_requirements(
        os.path.join(os.path.dirname(__file__), "requirements.txt"),
        session=PipSession())
    return [str(req.req) for req in requirements]


def get_version():
    __version__ = None
    with open('kyber/_version.py') as version_src:
        exec(version_src.read())
    return __version__

setup(
    name='kyber-k8s',
    version=get_version(),
    description='Deploy and manage simple apps in kubernetes.',
    url='https://github.com/TakumiHQ/kyber',
    author='Steinn Eldjarn Sigurdarson',
    author_email='*****@*****.**',
    keywords=['aws', 'kubernetes', 'deployments', 'app', 'paas'],
    install_requires=[str(req.req) for req in parse_requirements("requirements.txt", session=PipSession())],
    packages=find_packages(),
    package_data={'kyber': ['templates/*.yaml', 'templates/*.sh']},
    entry_points='''
        [console_scripts]
        kb=kyber:cli
    '''
)
Beispiel #23
0
def find_requirements(f='requirements.txt'):
    # parse_requirements() returns generator of pip.req.InstallRequirement objects
    reqs = parse_requirements("requirements.txt", session=PipSession())
    install_reqs = [str(ir.req) for ir in reqs]
    return install_reqs
Beispiel #24
0
def get_requirements():
    requirements = parse_requirements(
        os.path.join(os.path.dirname(__file__), "requirements.txt"),
        session=PipSession())
    return [str(req.req) for req in requirements]
Beispiel #25
0
def test_finder_detects_latest_find_links(data):
    """Test PackageFinder detects latest using find-links"""
    req = InstallRequirement.from_line('simple', None)
    finder = PackageFinder([data.find_links], [], session=PipSession())
    link = finder.find_requirement(req, False)
    assert link.url.endswith("simple-3.0.tar.gz")
Beispiel #26
0

version = load_version()
long_description = load_readme()

# Build description from README and build metadata from Go pipeline.
long_description += "\n"
long_description += "build_revision: {}\n".format(os.getenv('GO_REVISION'))
long_description += "build_pipeline: {}\n".format(
    os.getenv('GO_PIPELINE_NAME'))
long_description += "build_label:    {}\n".format(
    os.getenv('GO_PIPELINE_LABEL'))

requirements = ['Django'] + [
    str(ir.req)
    for ir in parse_requirements('./requirements.txt', session=PipSession())
]

test_requirements = [
    str(ir.req) for ir in parse_requirements('./requirements-dev.txt',
                                             session=PipSession())
]

setup(
    name='baya',
    version=version,
    description="Nested LDAP Groups authorization.",
    long_description=long_description,
    author='Steven Buss',
    author_email='*****@*****.**',
    maintainer='Counsyl Platform Team',
Beispiel #27
0
def test_find_all_versions_nothing(data):
    """Find nothing without anything"""
    finder = PackageFinder([], [], session=PipSession())
    assert not finder._find_all_versions('pip')
Beispiel #28
0
def read_requirements():
    '''parses requirements from requirements.txt'''
    reqs_path = os.path.join('.', 'requirements.txt')
    install_reqs = parse_requirements(reqs_path, session=PipSession())
    reqs = [str(ir.req) for ir in install_reqs]
    return reqs
Beispiel #29
0
def test_find_all_versions_index(data):
    finder = PackageFinder(
        [], [data.index_url('simple')], session=PipSession())
    versions = finder._find_all_versions('simple')
    assert [str(v.version) for v in versions] == ['1.0']
Beispiel #30
0
from setuptools import setup, find_packages
from pip.download import PipSession
from pip.req import parse_requirements

install_reqs = parse_requirements('requirements.txt', session=PipSession())

reqs = [str(ir.req) for ir in install_reqs]

setup(name="cloudbrain",
      version="0.0.1",
      description="CloudBrain",
      author="Marion Le Borgne",
      url="https://github.com/cloudbrain/cloudbrain",
      package_dir={'': 'src'},
      packages=find_packages('src'),
      install_requires=reqs,
      license=open('LICENSE.txt').read(),
      long_description=open('README.md').read(),
      test_suite='nose.collector',
      tests_require=['mock==1.0.1', 'nose'])
Beispiel #31
0
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#########################################################################

try:  # for pip >= 10
    from pip._internal.req import parse_requirements
    pip_session = 'hack'
except ImportError:  # for pip <= 9.0.3
    from pip.req import parse_requirements
    from pip.download import PipSession
    pip_session = PipSession()
from distutils.core import setup

from setuptools import find_packages

# Parse requirements.txt to get the list of dependencies
inst_req = parse_requirements('requirements.txt', session=pip_session)
REQUIREMENTS = [str(r.req) for r in inst_req]

setup(
    name='GeoNode',
    version=__import__('geonode').get_version(),
    description="Application for serving and sharing geospatial data",
    long_description=open('README.md').read(),
    classifiers=["Development Status :: 5 - Production/Stable"],
    python_requires='>=3',
Beispiel #32
0
INFO_FILE = 'pyrosalind/info.py'
with open(INFO_FILE) as fd:
    code = compile(fd.read(), INFO_FILE, 'exec')
    local_vars = {}
    exec(code, {}, local_vars)  # don't use global vars, save local_vars
    __pkg_name__ = local_vars['__name__']  # save the interesting data
    __version__ = local_vars['__version__']


# access to the file at the package top level (like README)
def path_to(filename):
    return os.path.join(os.path.dirname(__file__), filename)

# parse_requirements() returns generator of pip.req.InstallRequirement objects
install_reqs = parse_requirements(path_to('requirements.txt'),
                                  session=PipSession())
reqs = [str(ir.req) for ir in install_reqs]


#########################
# SETUP                 #
#########################
setup(
    name = __pkg_name__,
    version = __version__,
    packages = find_packages(),
    include_package_data = True,  # read the MANIFEST.in file
    install_requires = reqs,

    author = "lucas bourneuf",
    author_email = "*****@*****.**",
Beispiel #33
0
from codecs import open
from os import path
from setuptools import setup
from pip.download import PipSession
from pip.req import parse_requirements

here = path.abspath(path.dirname(__file__))

with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
    long_description = f.read()

lista_objetos_dependencias = parse_requirements('requirements.txt', session=PipSession())
lista_dependencias = [str(objeto.req) for objeto in lista_objetos_dependencias]

setup(
    name='toggl-client',
    packages=["client"],
    version='0.1.1',
    description='Cliente para a toggl API',
    long_description=long_description,
    url='https://github.com/lramosduarte/toggl-client',
    author='Leonardo Ramos Duarte',
    author_email='*****@*****.**',
    license='MIT',

    entry_points={
        'console_scripts': [
            'tgc = client.main:main',
        ]
    },
Beispiel #34
0
    def get_page(cls, link, req, cache=None, skip_archives=True, session=None):
        if session is None:
            session = PipSession()

        url = link.url
        url = url.split("#", 1)[0]
        if cache.too_many_failures(url):
            return None

        # Check for VCS schemes that do not support lookup as web pages.
        from pip.vcs import VcsSupport

        for scheme in VcsSupport.schemes:
            if url.lower().startswith(scheme) and url[len(scheme)] in "+:":
                logger.debug("Cannot look at %(scheme)s URL %(link)s" % locals())
                return None

        if cache is not None:
            inst = cache.get_page(url)
            if inst is not None:
                return inst
        try:
            if skip_archives:
                if cache is not None:
                    if cache.is_archive(url):
                        return None
                filename = link.filename
                for bad_ext in [".tar", ".tar.gz", ".tar.bz2", ".tgz", ".zip"]:
                    if filename.endswith(bad_ext):
                        content_type = cls._get_content_type(url, session=session)
                        if content_type.lower().startswith("text/html"):
                            break
                        else:
                            logger.debug("Skipping page %s because of Content-Type: %s" % (link, content_type))
                            if cache is not None:
                                cache.set_is_archive(url)
                            return None
            logger.debug("Getting page %s" % url)

            # Tack index.html onto file:// URLs that point to directories
            (scheme, netloc, path, params, query, fragment) = urlparse.urlparse(url)
            if scheme == "file" and os.path.isdir(url2pathname(path)):
                # add trailing slash if not present so urljoin doesn't trim final segment
                if not url.endswith("/"):
                    url += "/"
                url = urlparse.urljoin(url, "index.html")
                logger.debug(" file: URL is directory, getting %s" % url)

            resp = session.get(url)
            resp.raise_for_status()

            # The check for archives above only works if the url ends with
            #   something that looks like an archive. However that is not a
            #   requirement. For instance http://sourceforge.net/projects/docutils/files/docutils/0.8.1/docutils-0.8.1.tar.gz/download
            #   redirects to http://superb-dca3.dl.sourceforge.net/project/docutils/docutils/0.8.1/docutils-0.8.1.tar.gz
            #   Unless we issue a HEAD request on every url we cannot know
            #   ahead of time for sure if something is HTML or not. However we
            #   can check after we've downloaded it.
            content_type = resp.headers.get("Content-Type", "unknown")
            if not content_type.lower().startswith("text/html"):
                logger.debug("Skipping page %s because of Content-Type: %s" % (link, content_type))
                if cache is not None:
                    cache.set_is_archive(url)
                return None

            inst = cls(resp.text, resp.url, resp.headers, trusted=link.trusted)
        except requests.HTTPError as exc:
            level = 2 if exc.response.status_code == 404 else 1
            cls._handle_fail(req, link, exc, url, cache=cache, level=level)
        except requests.Timeout:
            cls._handle_fail(req, link, "timed out", url, cache=cache)
        except SSLError as exc:
            reason = "There was a problem confirming the ssl certificate: " "%s" % exc
            cls._handle_fail(req, link, reason, url, cache=cache, level=2, meth=logger.notify)
        else:
            if cache is not None:
                cache.add_page([url, resp.url], inst)
            return inst
Beispiel #35
0
    def get_page(cls, link, req, cache=None, skip_archives=True, session=None):
        if session is None:
            session = PipSession()

        url = link.url
        url = url.split('#', 1)[0]
        if cache.too_many_failures(url):
            return None

        # Check for VCS schemes that do not support lookup as web pages.
        from pip.vcs import VcsSupport
        for scheme in VcsSupport.schemes:
            if url.lower().startswith(scheme) and url[len(scheme)] in '+:':
                logger.debug(
                    'Cannot look at %(scheme)s URL %(link)s' % locals()
                )
                return None

        if cache is not None:
            inst = cache.get_page(url)
            if inst is not None:
                return inst
        try:
            if skip_archives:
                if cache is not None:
                    if cache.is_archive(url):
                        return None
                filename = link.filename
                for bad_ext in ['.tar', '.tar.gz', '.tar.bz2', '.tgz', '.zip']:
                    if filename.endswith(bad_ext):
                        content_type = cls._get_content_type(
                            url, session=session,
                        )
                        if content_type.lower().startswith('text/html'):
                            break
                        else:
                            logger.debug(
                                'Skipping page %s because of Content-Type: '
                                '%s' % (link, content_type)
                            )
                            if cache is not None:
                                cache.set_is_archive(url)
                            return None
            logger.debug('Getting page %s' % url)

            # Tack index.html onto file:// URLs that point to directories
            (scheme, netloc, path, params, query, fragment) = \
                urlparse.urlparse(url)
            if scheme == 'file' and os.path.isdir(url2pathname(path)):
                # add trailing slash if not present so urljoin doesn't trim
                # final segment
                if not url.endswith('/'):
                    url += '/'
                url = urlparse.urljoin(url, 'index.html')
                logger.debug(' file: URL is directory, getting %s' % url)

            resp = session.get(url, headers={"Accept": "text/html"})
            resp.raise_for_status()

            # The check for archives above only works if the url ends with
            #   something that looks like an archive. However that is not a
            #   requirement of an url. Unless we issue a HEAD request on every
            #   url we cannot know ahead of time for sure if something is HTML
            #   or not. However we can check after we've downloaded it.
            content_type = resp.headers.get('Content-Type', 'unknown')
            if not content_type.lower().startswith("text/html"):
                logger.debug(
                    'Skipping page %s because of Content-Type: %s' %
                    (link, content_type)
                )
                if cache is not None:
                    cache.set_is_archive(url)
                return None

            inst = cls(resp.text, resp.url, resp.headers, trusted=link.trusted)
        except requests.HTTPError as exc:
            level = 2 if exc.response.status_code == 404 else 1
            cls._handle_fail(req, link, exc, url, cache=cache, level=level)
        except requests.ConnectionError as exc:
            cls._handle_fail(
                req, link, "connection error: %s" % exc, url,
                cache=cache,
            )
        except requests.Timeout:
            cls._handle_fail(req, link, "timed out", url, cache=cache)
        except SSLError as exc:
            reason = ("There was a problem confirming the ssl certificate: "
                      "%s" % exc)
            cls._handle_fail(
                req, link, reason, url,
                cache=cache,
                level=2,
                meth=logger.notify,
            )
        else:
            if cache is not None:
                cache.add_page([url, resp.url], inst)
            return inst
Beispiel #36
0
 def test_req_file_parse_no_only_binary(self, data, finder):
     list(parse_requirements(
         data.reqfiles.join("supported_options2.txt"), finder,
         session=PipSession()))
     expected = pip.index.FormatControl(set(['fred']), set(['wilma']))
     assert finder.format_control == expected
Beispiel #37
0
def session():
    return PipSession()
Beispiel #38
0
def parse(directory):
    # Parse the requirements.txt
    requirement_packages = []
    if os.path.isfile(directory + '/requirements.txt'):
        try:
            requirements = pip.req.req_file.parse_requirements(
                directory + '/requirements.txt', session=PipSession())
            for install_req in requirements:
                if install_req.original_link:
                    continue
                if install_req.is_pinned:
                    version = next(iter(install_req.specifier)).version
                else:
                    version = None

                pattern = r"-[cr] (.*) \(line \d+\)"
                abs_path = re.search(pattern, install_req.comes_from).group(1)
                rel_path = os.path.relpath(abs_path, directory)

                requirement_packages.append({
                    "name":
                    install_req.req.name,
                    "version":
                    version,
                    "file":
                    rel_path,
                    "requirement":
                    str(install_req.specifier) or None
                })
        except Exception as e:
            print(json.dumps({"error": repr(e)}))
            exit(1)

    # Parse the setup.py
    setup_packages = []
    if os.path.isfile(directory + '/setup.py'):

        def setup(*args, **kwargs):
            for arg in ['install_requires', 'tests_require']:
                if not kwargs.get(arg):
                    continue
                for req in kwargs.get(arg):
                    install_req = InstallRequirement.from_line(req)
                    if install_req.original_link:
                        continue
                    if install_req.is_pinned:
                        version = next(iter(install_req.specifier)).version
                    else:
                        version = None
                    setup_packages.append({
                        "name":
                        install_req.req.name,
                        "version":
                        version,
                        "file":
                        "setup.py",
                        "requirement":
                        str(install_req.specifier) or None
                    })

        setuptools.setup = setup

        def noop(*args, **kwargs):
            pass

        global fake_open

        def fake_open(*args, **kwargs):
            content = ("VERSION = (0, 0, 1)\n"
                       "__version__ = '0.0.1'\n"
                       "__author__ = 'someone'\n"
                       "__title__ = 'something'\n"
                       "__description__ = 'something'\n"
                       "__author_email__ = 'something'\n"
                       "__license__ = 'something'\n"
                       "__url__ = 'something'\n")
            return io.StringIO(content)

        try:
            content = open(directory + '/setup.py', 'r').read()

            # Remove `print`, `open` and import statements
            content = content.replace("print(", "noop(")
            content = re.sub(r"\b(\w+\.)*(open|file)\(", "fake_open(", content)
            version_re = re.compile(r"^.*import.*__version__.*$", re.MULTILINE)
            content = re.sub(version_re, "", content)

            # Set variables likely to be imported
            __version__ = '0.0.1'
            __author__ = 'someone'
            __title__ = 'something'
            __description__ = 'something'
            __author_email__ = 'something'
            __license__ = 'something'
            __url__ = 'something'

            # Exec the setup.py
            exec(content)
        except Exception as e:
            pass

    return json.dumps({"result": requirement_packages + setup_packages})