Example #1
0
def test_find_all_versions_nothing(data):
    """Find nothing without anything"""
    finder = PackageFinder([], [], session=PipSession())
    assert not finder._find_all_versions('pip')
Example #2
0
import os
from setuptools import setup, find_packages
from pip.req import parse_requirements
from pip.download import PipSession

version = '0.1.0'


def read(f):
    return open(os.path.join(os.path.dirname(__file__), f)).read().strip()


install_reqs = parse_requirements('requirements.txt', session=PipSession())
reqs = [str(ir.req) for ir in install_reqs]

setup(
    name='vertexarray',
    version=version,
    description=('Simple vertex array that avoids repeating vertices'),
    long_description='\n\n'.join((read('README.md'), read('CHANGELOG'))),
    classifiers=[
        'License :: OSI Approved :: MIT License',
        'Intended Audience :: Other Audience',
        'Programming Language :: Python :: 3'
    ],
    author='K.C.Saff',
    author_email='*****@*****.**',
    url='https://github.com/kcsaff/vertexarray',
    license='MIT',
    packages=find_packages(),
    install_requires=reqs,
Example #3
0
        config['loris.Loris']['run_as_user'] = self.loris_owner
        config['loris.Loris']['run_as_group'] = self.loris_group
        config['logging']['log_dir'] = self.log_dir
        config['resolver']['src_img_root'] = self.source_images
        config['img.ImageCache']['cache_dp'] = self.image_cache
        config['img_info.InfoCache']['cache_dp'] = self.info_cache
        config['transforms']['jp2']['kdu_expand'] = self.kdu_expand
        config['transforms']['jp2']['kdu_libs'] = self.libkdu
        config['transforms']['jp2']['tmp_dp'] = self.tmp_dir

        config.filename = config_file_target
        config.write()


install_requires = parse_requirements(local_file('requirements.txt'),
                                      session=PipSession())


def _read(fname):
    return open(local_file(fname)).read()


setup(cmdclass={'install': LorisInstallCommand},
      name='Loris',
      author='Jon Stroop',
      author_email='*****@*****.**',
      url='https://github.com/loris-imageserver/loris',
      description=('IIIF Image API 2.0 Level 2 compliant Image Server'),
      long_description=_read('README.md'),
      license='Simplified BSD',
      version=VERSION,
Example #4
0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from codecs import open
import os

from pip.req import parse_requirements

kwargs = {}
try:
    # pip's parse_requirements added a required 'session=' argument after version 6.0+.
    #
    # Given that the pip installed on our Jenkins is older, we can't just use the newer
    # config. So I'm catching the import error and defaulting to None if it's older pip.
    from pip.download import PipSession
    kwargs['session'] = PipSession()
except ImportError:
    pass

from setuptools import setup, find_packages

install_requirements = [
    str(requirement.req)
    for requirement in parse_requirements('./requirements.txt', **kwargs)
]
test_requirements = [
    str(requirement.req)
    for requirement in parse_requirements('./test_requirements.txt', **kwargs)
]

# Get the long description from the relevant file
here = os.path.abspath(os.path.dirname(__file__))
Example #5
0
from pip.download import PipSession
from pip.req import parse_requirements
from setuptools import setup, find_packages

install_reqs = parse_requirements("./requirements.txt", session=PipSession())
install_requires = [str(ir.req).split('==')[0] for ir in install_reqs]
setup(name='gps-server',
      packages=find_packages(exclude=['examples', 'tests']),
      version='1.0',
      description='GPS Server and Kafka Producer',
      author='Abhishek Verma, Chirag',
      author_email='*****@*****.**',
      package_data={'': ['*.json']},
      install_requires=install_requires)
Example #6
0
def requirements(filename):
    """Parse requirements files."""
    return [
        f'{req.name}{req.specifier}'
        for req in parse_requirements(filename=filename, session=PipSession())
    ]
Example #7
0
def parse_requirements(reqs):
    return pip.req.parse_requirements(reqs, session=PipSession())
Example #8
0
def parse_requirements(f):
    return [str(r.req) for r in requirements(f, session=PipSession())]
Example #9
0
def get_requirements(file_name):
    """Возвращает список зависимостей med."""
    requirements = parse_requirements(file_name, session=PipSession())
    return [str(ir.req) for ir in requirements]
Example #10
0
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#########################################################################

try:  # for pip >= 10
    from pip._internal.req import parse_requirements
    pip_session = 'hack'
except ImportError:  # for pip <= 9.0.3
    from pip.req import parse_requirements
    from pip.download import PipSession
    pip_session = PipSession()
from distutils.core import setup

from setuptools import find_packages

# Parse requirements.txt to get the list of dependencies
inst_req = parse_requirements('requirements.txt', session=pip_session)
REQUIREMENTS = [str(r.req) for r in inst_req]

setup(
    name='GeoNode',
    version=__import__('geonode').get_version(),
    description="Application for serving and sharing geospatial data",
    long_description=open('README.md').read(),
    classifiers=["Development Status :: 5 - Production/Stable"],
    python_requires='>=3',
Example #11
0
from distutils.core import setup
from os import path
from pip.req import parse_requirements
from pip.download import PipSession

here = path.abspath(path.join(path.dirname(path.abspath(__file__))))
requirements = [str(ir.req) for ir in
                parse_requirements(path.join(here, "requirements.txt"), session=PipSession())]

# Ugly hack to get the version prior to installation, without having the amcat
# package depend on setup.py.
execfile(path.join(here, "amcat", "_version.py"))

package = dict(
    name='amcat',
    version=__version__,
    packages=[ #  we could do with less of those
        'navigator.views',
        'navigator',
        'navigator.utils',
        'navigator.templatetags',
        'amcat.tests',
        'amcat.management',
        'amcat.management.commands',
        'amcat.scripts.output',
        'amcat.scripts.tools',
        'amcat.scripts',
        'amcat.scripts.forms',
        'amcat.scripts.actions',
        'amcat.scripts.processors',
        'amcat.scripts.searchscripts',
Example #12
0
def test_find_all_versions_find_links_and_index(data):
    finder = PackageFinder(
        [data.find_links], [data.index_url('simple')], session=PipSession())
    versions = finder._find_all_versions('simple')
    # first the find-links versions then the page versions
    assert [str(v.version) for v in versions] == ['3.0', '2.0', '1.0', '1.0']
Example #13
0
def test_find_all_versions_index(data):
    finder = PackageFinder(
        [], [data.index_url('simple')], session=PipSession())
    versions = finder._find_all_versions('simple')
    assert [str(v.version) for v in versions] == ['1.0']
Example #14
0
def test_find_all_versions_find_links(data):
    finder = PackageFinder(
        [data.find_links], [], session=PipSession())
    versions = finder._find_all_versions('simple')
    assert [str(v.version) for v in versions] == ['3.0', '2.0', '1.0']
Example #15
0
pip_dist = pkg_resources.get_distribution('pip')
pip_version = tuple(map(int, pip_dist.version.split('.')))

# Use a base partial that will be updated depending on the version of pip
parse_requirements = functools.partial(parse_reqs, options=None)

if pip_version < (1, 2):
    # pip versions before 1.2 require an options keyword for using it outside
    # of invoking a pip shell command
    from pip.baseparser import parser
    parse_requirements.keywords['options'] = parser.parse_args()[0]

if pip_version >= (1, 5):
    # pip 1.5 introduced a session kwarg that is required in later versions
    from pip.download import PipSession
    parse_requirements.keywords['session'] = PipSession()


# If installing on python 2.6, we need to install the argparse backport
extra_requires = []
if sys.version_info[:2] == (2, 6):
    extra_requires = ['argparse==1.3.0']


class PyTest(TestCommand):
    def finalize_options(self):
        TestCommand.finalize_options(self)
        self.test_args = []
        self.test_suite = True

    def run_tests(self):
Example #16
0
def get_requirements(file_name):
    return [
        str(requirement.req)
        for requirement in parse_requirements(file_name, session=PipSession())
    ]
Example #17
0
from pip.download import PipSession
from pip.req import parse_requirements
from setuptools import setup

import django_numpy

BASE_DIR = os.path.abspath(os.path.dirname(__file__))

if sys.version_info[0] == 2:
    from codecs import open

# Read requirements
_requirements_file = os.path.join(BASE_DIR, 'requirements.txt')
_REQUIRES = [
    str(r.req)
    for r in parse_requirements(_requirements_file, session=PipSession())
]

# Read description
with open(os.path.join(BASE_DIR, 'README.rst'), encoding='utf-8') as f:
    _LONG_DESCRIPTION = f.read()

_CLASSIFIERS = (
    'Development Status :: 5 - Production/Stable',
    'Framework :: Django',
    'Intended Audience :: Developers',
    'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',
    'Natural Language :: English',
    'Programming Language :: Python',
    'Programming Language :: Python :: 2',
    'Programming Language :: Python :: 3',
Example #18
0
    def test_cache_defaults_off(self):
        session = PipSession()

        assert not hasattr(session.adapters["http://"], "cache")
        assert not hasattr(session.adapters["https://"], "cache")
Example #19
0
    def get_page(cls, link, req, cache=None, skip_archives=True, session=None):
        if session is None:
            session = PipSession()

        url = link.url
        url = url.split('#', 1)[0]
        if cache.too_many_failures(url):
            return None

        # Check for VCS schemes that do not support lookup as web pages.
        from pip.vcs import VcsSupport
        for scheme in VcsSupport.schemes:
            if url.lower().startswith(scheme) and url[len(scheme)] in '+:':
                logger.debug('Cannot look at %(scheme)s URL %(link)s' %
                             locals())
                return None

        if cache is not None:
            inst = cache.get_page(url)
            if inst is not None:
                return inst
        try:
            if skip_archives:
                if cache is not None:
                    if cache.is_archive(url):
                        return None
                filename = link.filename
                for bad_ext in ['.tar', '.tar.gz', '.tar.bz2', '.tgz', '.zip']:
                    if filename.endswith(bad_ext):
                        content_type = cls._get_content_type(
                            url,
                            session=session,
                        )
                        if content_type.lower().startswith('text/html'):
                            break
                        else:
                            logger.debug(
                                'Skipping page %s because of Content-Type: %s'
                                % (link, content_type))
                            if cache is not None:
                                cache.set_is_archive(url)
                            return None
            logger.debug('Getting page %s' % url)

            # Tack index.html onto file:// URLs that point to directories
            (scheme, netloc, path, params, query,
             fragment) = urlparse.urlparse(url)
            if scheme == 'file' and os.path.isdir(url2pathname(path)):
                # add trailing slash if not present so urljoin doesn't trim final segment
                if not url.endswith('/'):
                    url += '/'
                url = urlparse.urljoin(url, 'index.html')
                logger.debug(' file: URL is directory, getting %s' % url)

            resp = session.get(url, headers={"Accept": "text/html"})
            resp.raise_for_status()

            # The check for archives above only works if the url ends with
            #   something that looks like an archive. However that is not a
            #   requirement. For instance http://sourceforge.net/projects/docutils/files/docutils/0.8.1/docutils-0.8.1.tar.gz/download
            #   redirects to http://superb-dca3.dl.sourceforge.net/project/docutils/docutils/0.8.1/docutils-0.8.1.tar.gz
            #   Unless we issue a HEAD request on every url we cannot know
            #   ahead of time for sure if something is HTML or not. However we
            #   can check after we've downloaded it.
            content_type = resp.headers.get('Content-Type', 'unknown')
            if not content_type.lower().startswith("text/html"):
                logger.debug('Skipping page %s because of Content-Type: %s' %
                             (link, content_type))
                if cache is not None:
                    cache.set_is_archive(url)
                return None

            inst = cls(resp.text, resp.url, resp.headers, trusted=link.trusted)
        except requests.HTTPError as exc:
            level = 2 if exc.response.status_code == 404 else 1
            cls._handle_fail(req, link, exc, url, cache=cache, level=level)
        except requests.ConnectionError as exc:
            cls._handle_fail(
                req,
                link,
                "connection error: %s" % exc,
                url,
                cache=cache,
            )
        except requests.Timeout:
            cls._handle_fail(req, link, "timed out", url, cache=cache)
        except SSLError as exc:
            reason = ("There was a problem confirming the ssl certificate: "
                      "%s" % exc)
            cls._handle_fail(
                req,
                link,
                reason,
                url,
                cache=cache,
                level=2,
                meth=logger.notify,
            )
        else:
            if cache is not None:
                cache.add_page([url, resp.url], inst)
            return inst
Example #20
0
    def test_http_cache_is_not_enabled(self, tmpdir):
        session = PipSession(cache=tmpdir.join("test-cache"))

        assert not hasattr(session.adapters["http://"], "cache")
Example #21
0
             if (reqs_files_re.match(freq) and not req.endswith('.in')
                     and req not in reqs_files):
                 reqs_files.append(req)
 for reqs_file in reqs_files:
     if os.path.isfile(reqs_file):
         reqs = []
         try:
             try:
                 reqs = [
                     a.req for a in pip.req.parse_requirements(reqs_file)
                 ]
             except TypeError:
                 from pip.download import PipSession
                 reqs = [
                     a.req for a in pip.req.parse_requirements(
                         reqs_file, session=PipSession())
                 ]
         except (Exception, ) as exc:
             sys.stderr.write(traceback.format_exc())
             sys.stderr.write("\n")
             sys.stderr.write(
                 '{0} unreadable, getting next req file'.format(reqs_file))
             sys.stderr.write("\n")
             continue
         if not reqs:
             continue
         for req in reqs:
             pkgreq = "{0}".format(req.__class__)
             # match pip._vendor.pkg_resources.Requirement
             # match pkg_resources.Requirement
             if ".Requirement" not in pkgreq:
Example #22
0
def test_user_agent():
    PipSession().headers["User-Agent"].startswith("pip/%s" % pip.__version__)
Example #23
0
 def basic_reqset(self, **kwargs):
     return RequirementSet(build_dir=os.path.join(self.tempdir, 'build'),
                           src_dir=os.path.join(self.tempdir, 'src'),
                           download_dir=None,
                           session=PipSession(),
                           **kwargs)
Example #24
0
def read_requirements():
    '''parses requirements from requirements.txt'''
    reqs_path = os.path.join(os.getcwd(), 'requirements.txt')
    install_reqs = parse_requirements(reqs_path, session=PipSession())
    reqs = [str(ir.req) for ir in install_reqs]
    return reqs
Example #25
0
def requirements(filename):
    """Parse requirements from requirements.txt."""
    path = str(Path(filename))
    reqs = parse_requirements(path, session=PipSession())
    return [str(req.req) for req in reqs]
Example #26
0
# You should have received a copy of the GNU General Public License
# along with Cyphon Engine. If not, see <http://www.gnu.org/licenses/>.
"""

"""
import os
try:
    from pip.download import PipSession
    from pip.req import parse_requirements
except ImportError:
    from pip._internal.download import PipSession
    from pip._internal.req import parse_requirements
from setuptools import find_packages, setup

# parse_requirements() returns generator of pip.req.InstallRequirement objects
INSTALL_REQS = parse_requirements('requirements.txt', session=PipSession())

# reqs is a list of requirements
# e.g. ['django==1.5.1', 'mezzanine==1.4.6']
REQUIREMENTS = [str(ir.req) for ir in INSTALL_REQS]

with open(os.path.join(os.path.dirname(__file__), 'README.rst')) as readme:
    README = readme.read()

# allow setup.py to be run from any path
os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))

setup(
    name='cyphon',
    version='1.6.3',
    install_requires=REQUIREMENTS,
Example #27
0
#!/usr/bin/env python
"""
The MIT License (MIT)
Copyright (c) 2017 AOUtils-Team

For full license details please see the LICENSE file located in the root folder
of the project.
"""

from pip.download import PipSession
from pip.req import parse_requirements
from setuptools import setup, find_packages

install_reqs = [
    str(ir.req)
    for ir in parse_requirements('requirements.txt', session=PipSession())
]

setup(name='AOUtils-Dashboard',
      author="AOUtils-Team",
      description='Dashboard for the Albion Online utilities tool suite.',
      url='https://github.com/AOUtils-Team/dashboard',
      version='0.0.1',
      packages=find_packages(exclude=['tests']),
      include_package_data=True,
      package_data={'dashboard': ['config.yml']},
      package_dir={'dashboard': 'dashboard'},
      install_requires=install_reqs,
      entry_points={
          'console_scripts':
          ['aoutils-dashboard-server = dasboard.server:main']
Example #28
0
import os

from setuptools import find_packages, setup

try:
    from pip.req import parse_requirements
    from pip.download import PipSession
except ImportError:
    from pip._internal.req import parse_requirements
    from pip._internal.download import PipSession

rf = codecs.open(os.path.join(os.path.dirname(__file__), "README.txt"), "r")
with rf as readme:
    README = readme.read()

with PipSession() as s:
    requirements = parse_requirements(os.path.join(os.path.dirname(__file__),
                                                   "requirements_as_lib.txt"),
                                      session=s)

os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))

setup(
    name="dev-cli",
    version="0.1.0",
    packages=find_packages("src"),
    package_dir={"": "src"},
    include_package_data=True,
    license="WTFPL",
    description="A CLI interface for dev.to",
    long_description=README,
Example #29
0
def get_requirements(source):
    install_reqs = parse_requirements(source, session=PipSession())
    return set([str(ir.req) for ir in install_reqs])
Example #30
0
def test_incorrect_case_file_index(data):
    """Test PackageFinder detects latest using wrong case"""
    req = InstallRequirement.from_line('dinner', None)
    finder = PackageFinder([], [data.find_links3], session=PipSession())
    link = finder.find_requirement(req, False)
    assert link.url.endswith("Dinner-2.0.tar.gz")