Example #1
0
 def correct_build_location(self):
     """If the build location was a temporary directory, this will move it
     to a new more permanent location"""
     if self.source_dir is not None:
         return
     assert self.req is not None
     assert self._temp_build_dir
     old_location = self._temp_build_dir
     new_build_dir = self._ideal_build_dir
     del self._ideal_build_dir
     if self.editable:
         name = self.name.lower()
     else:
         name = self.name
     new_location = os.path.join(new_build_dir, name)
     if not os.path.exists(new_build_dir):
         logger.debug('Creating directory %s' % new_build_dir)
         _make_build_dir(new_build_dir)
     if os.path.exists(new_location):
         raise InstallationError(
             'A package already exists in %s; please remove it to continue'
             % display_path(new_location))
     logger.debug(
         'Moving package %s from %s to new location %s' %
         (self, display_path(old_location), display_path(new_location)))
     shutil.move(old_location, new_location)
     self._temp_build_dir = new_location
     self.source_dir = new_location
     self._egg_info_path = None
Example #2
0
    def _get_queued_page(self, req, pending_queue, done, seen):
        while 1:
            try:
                location = pending_queue.get(False)
            except QueueEmpty:
                return
            if location in seen:
                continue
            seen.add(location)
            page = self._get_page(location, req)
            if page is None:
                continue
            done.append(page)
            for link in page.rel_links():
                normalized = normalize_name(req.name).lower()

                if (not normalized in self.allow_external
                        and not self.allow_all_external):
                    self.need_warn_external = True
                    logger.debug("Not searching %s for files because external "
                                 "urls are disallowed." % link)
                    continue

                if (link.trusted is not None and not link.trusted
                        and not normalized in self.allow_insecure
                        and not self.allow_all_insecure
                    ):  # TODO: Remove after release
                    logger.debug("Not searching %s for urls, it is an "
                                 "untrusted link and cannot produce safe or "
                                 "verifiable files." % link)
                    self.need_warn_insecure = True
                    continue

                pending_queue.put(link)
Example #3
0
 def paths(self):
     """All the entries of sys.path, possibly restricted by --path"""
     if not self.select_paths:
         return sys.path
     result = []
     match_any = set()
     for path in sys.path:
         path = os.path.normcase(os.path.abspath(path))
         for match in self.select_paths:
             match = os.path.normcase(os.path.abspath(match))
             if '*' in match:
                 if re.search(fnmatch.translate(match+'*'), path):
                     result.append(path)
                     match_any.add(match)
                     break
             else:
                 if path.startswith(match):
                     result.append(path)
                     match_any.add(match)
                     break
         else:
             logger.debug("Skipping path %s because it doesn't match %s"
                          % (path, ', '.join(self.select_paths)))
     for match in self.select_paths:
         if match not in match_any and '*' not in match:
             result.append(match)
             logger.debug("Adding path %s because it doesn't match anything already on sys.path"
                          % match)
     return result
Example #4
0
 def paths(self):
     """All the entries of sys.path, possibly restricted by --path"""
     if not self.select_paths:
         return sys.path
     result = []
     match_any = set()
     for path in sys.path:
         path = os.path.normcase(os.path.abspath(path))
         for match in self.select_paths:
             match = os.path.normcase(os.path.abspath(match))
             if '*' in match:
                 if re.search(fnmatch.translate(match+'*'), path):
                     result.append(path)
                     match_any.add(match)
                     break
             else:
                 if path.startswith(match):
                     result.append(path)
                     match_any.add(match)
                     break
         else:
             logger.debug("Skipping path %s because it doesn't match %s"
                          % (path, ', '.join(self.select_paths)))
     for match in self.select_paths:
         if match not in match_any and '*' not in match:
             result.append(match)
             logger.debug("Adding path %s because it doesn't match anything already on sys.path"
                          % match)
     return result
 def correct_build_location(self):
     """If the build location was a temporary directory, this will move it
     to a new more permanent location"""
     if self.source_dir is not None:
         return
     assert self.req is not None
     assert self._temp_build_dir
     old_location = self._temp_build_dir
     new_build_dir = self._ideal_build_dir
     del self._ideal_build_dir
     if self.editable:
         name = self.name.lower()
     else:
         name = self.name
     new_location = os.path.join(new_build_dir, name)
     if not os.path.exists(new_build_dir):
         logger.debug('Creating directory %s' % new_build_dir)
         _make_build_dir(new_build_dir)
     if os.path.exists(new_location):
         raise InstallationError(
             'A package already exists in %s; please remove it to continue'
             % display_path(new_location))
     logger.debug(
         'Moving package %s from %s to new location %s' %
         (self, display_path(old_location), display_path(new_location))
     )
     shutil.move(old_location, new_location)
     self._temp_build_dir = new_location
     self.source_dir = new_location
     self._egg_info_path = None
Example #6
0
    def _get_queued_page(self, req, pending_queue, done, seen):
        while 1:
            try:
                location = pending_queue.get(False)
            except QueueEmpty:
                return
            if location in seen:
                continue
            seen.add(location)
            page = self._get_page(location, req)
            if page is None:
                continue
            done.append(page)
            for link in page.rel_links():
                normalized = normalize_name(req.name).lower()

                if (not normalized in self.allow_external
                        and not self.allow_all_external):
                    self.need_warn_external = True
                    logger.debug("Not searching %s for files because external "
                                 "urls are disallowed." % link)
                    continue

                if (link.trusted is not None
                        and not link.trusted
                        and not normalized in self.allow_insecure
                        and not self.allow_all_insecure):  # TODO: Remove after release
                    logger.debug("Not searching %s for urls, it is an "
                                "untrusted link and cannot produce safe or "
                                "verifiable files." % link)
                    self.need_warn_insecure = True
                    continue

                pending_queue.put(link)
Example #7
0
    def get_available_substitute(self, install_req):
        """Find an available substitute for the given package.
           Returns a PackageData object.
        """
        global __InstallationErrorMessage__

        new_candidate_package_data = PackageData.from_dist(install_req)
        if new_candidate_package_data.name is None:
            # cannot find alternative versions without a name.
            return None

        existing_package_data = self.find_potential_substitutes(new_candidate_package_data.name)
        if existing_package_data is None:
            return None

        packages_in_conflict = [new_candidate_package_data, existing_package_data]
        # Force a hash so that the cmp operator is not used
        editables = set([hash(p) for p in packages_in_conflict if p.editable])
        if len(editables) == 2:

            local_editable_path = os.path.join(sys.prefix, 'src', existing_package_data.name)
            if os.path.isdir(local_editable_path):

                if self.check_for_uncommited_git_changes(local_editable_path):
                    raise InstallationError("{message}. In path: {path}".format(
                                            message=__InstallationErrorMessage__,
                                            path=local_editable_path))

            # This is an expensive comparison, so let's cache results
            competing_version_urls = [str(r.url) for r in packages_in_conflict]
            cmp_result = self.get_cached_comparison_result(*competing_version_urls)
            if cmp_result is None:
                # We're comparing two versions of an editable because we know we're going to use the software in
                # the given repo (its just the version that's not decided yet).
                # So let's check out the repo into the src directory. Later (when we have the version) update_editable
                # will use the correct version anyway.
                repo_dir = self.checkout_if_necessary(new_candidate_package_data)
                cmp = GitVersionComparator(repo_dir, self.prefer_pinned_revision)
                try:
                    versions = [GitVersionComparator.get_version_string_from_url(r.url) for r in packages_in_conflict]
                    if len([v for v in versions if v == None]) == 2:
                        # if either the existing requirement or the new candidate has no version info and is editable,
                        # we better update our clone and re-run setup.
                        return existing_package_data  # OPTIMIZE return with the installed version
                    cmp_result = cmp.compare_versions(*versions)

                    self.save_comparison_result(competing_version_urls[0], competing_version_urls[1], cmp_result)
                except SeparateBranchException, exc:
                    raise InstallationError(
                        "%s: Conflicting versions cannot be compared as they are not direct descendants according to git. Exception: %s, Package data: %s." % (
                        new_candidate_package_data.name,
                        str([p.__dict__ for p in packages_in_conflict]),
                        str(exc.args)))
            else:
                logger.debug("using cached comparison: %s %s -> %s" % (competing_version_urls[0], competing_version_urls[1], cmp_result))
            return None if cmp_result == GitVersionComparator.GT else existing_package_data
Example #8
0
 def assert_source_matches_version(self):
     assert self.source_dir
     version = self.installed_version
     if version not in self.req:
         logger.warn('Requested %s, but installing version %s' %
                     (self, self.installed_version))
     else:
         logger.debug(
             'Source in %s has version %s, which satisfies requirement %s' %
             (display_path(self.source_dir), version, self))
Example #9
0
 def _egg_info_matches(self, egg_info, search_name, link):
     match = self._egg_info_re.search(egg_info)
     if not match:
         logger.debug('Could not parse version from link: %s' % link)
         return None
     name = match.group(0).lower()
     # To match the "safe" name that pkg_resources creates:
     name = name.replace('_', '-')
     if name.startswith(search_name.lower()):
         return match.group(0)[len(search_name):].lstrip('-')
     else:
         return None
Example #10
0
 def _egg_info_matches(self, egg_info, search_name, link):
     match = self._egg_info_re.search(egg_info)
     if not match:
         logger.debug('Could not parse version from link: %s' % link)
         return None
     name = match.group(0).lower()
     # To match the "safe" name that pkg_resources creates:
     name = name.replace('_', '-')
     if name.startswith(search_name.lower()):
         return match.group(0)[len(search_name):].lstrip('-')
     else:
         return None
 def requirements(self, extras=()):
     in_extra = None
     for line in self.egg_info_lines('requires.txt'):
         match = self._requirements_section_re.match(line.lower())
         if match:
             in_extra = match.group(1)
             continue
         if in_extra and in_extra not in extras:
             logger.debug('skipping extra %s' % in_extra)
             # Skip requirement for an extra we aren't requiring
             continue
         yield line
Example #12
0
 def requirements(self, extras=()):
     in_extra = None
     for line in self.egg_info_lines('requires.txt'):
         match = self._requirements_section_re.match(line.lower())
         if match:
             in_extra = match.group(1)
             continue
         if in_extra and in_extra not in extras:
             logger.debug('skipping extra %s' % in_extra)
             # Skip requirement for an extra we aren't requiring
             continue
         yield line
 def assert_source_matches_version(self):
     assert self.source_dir
     version = self.installed_version
     if version not in self.req:
         logger.warn(
             'Requested %s, but installing version %s' %
             (self, self.installed_version)
         )
     else:
         logger.debug(
             'Source in %s has version %s, which satisfies requirement %s' %
             (display_path(self.source_dir), version, self)
         )
Example #14
0
 def _egg_info_matches(self, egg_info, search_name, link):
     match = self._egg_info_re.search(egg_info)
     if not match:
         logger.debug('Could not parse version from link: %s' % link)
         return None
     name = match.group(0).lower()
     # To match the "safe" name that pkg_resources creates:
     name = name.replace('_', '-')
     # project name and version must be separated by a dash
     look_for = search_name.lower() + "-"
     if name.startswith(look_for):
         return match.group(0)[len(look_for):]
     else:
         return None
Example #15
0
 def _egg_info_matches(self, egg_info, search_name, link):
     match = self._egg_info_re.search(egg_info)
     if not match:
         logger.debug('Could not parse version from link: %s' % link)
         return None
     name = match.group(0).lower()
     # To match the "safe" name that pkg_resources creates:
     name = name.replace('_', '-')
     # project name and version must be separated by a dash
     look_for = search_name.lower() + "-"
     if name.startswith(look_for):
         return match.group(0)[len(look_for):]
     else:
         return None
Example #16
0
    def __build_one(self, req, tempd, python_tag=None):
        base_args = self._base_setup_args(req)

        spin_message = 'Running setup.py bdist_wheel for %s' % (req.name,)
        with open_spinner(spin_message) as spinner:
            logger.debug('Destination directory: %s', tempd)
            wheel_args = base_args + ['bdist_wheel', '-d', tempd] \
                + self.build_options

            if python_tag is not None:
                wheel_args += ["--python-tag", python_tag]

            try:
                call_subprocess(wheel_args, cwd=req.source_dir,
                                show_stdout=False, spinner=spinner)
                return True
            except:
Example #17
0
def _download_url(resp, link, temp_location):
    fp = open(temp_location, 'wb')
    download_hash = None
    if link.md5_hash:
        download_hash = md5()
    try:
        total_length = int(resp.info()['content-length'])
    except (ValueError, KeyError):
        total_length = 0
    downloaded = 0
    show_progress = total_length > 40 * 1000 or not total_length
    show_url = link.show_url
    try:
        if show_progress:
            ## FIXME: the URL can get really long in this message:
            if total_length:
                logger.start_progress('Downloading %s (%s): ' %
                                      (show_url, format_size(total_length)))
            else:
                logger.start_progress('Downloading %s (unknown size): ' %
                                      show_url)
        else:
            logger.notify('Downloading %s' % show_url)
        logger.debug('Downloading from URL %s' % link)

        while 1:
            chunk = resp.read(4096)
            if not chunk:
                break
            downloaded += len(chunk)
            if show_progress:
                if not total_length:
                    logger.show_progress('%s' % format_size(downloaded))
                else:
                    logger.show_progress('%3i%%  %s' %
                                         (100 * downloaded / total_length,
                                          format_size(downloaded)))
            if link.md5_hash:
                download_hash.update(chunk)
            fp.write(chunk)
        fp.close()
    finally:
        if show_progress:
            logger.end_progress('%s downloaded' % format_size(downloaded))
    return download_hash
Example #18
0
def _download_url(resp, link, temp_location):
    fp = open(temp_location, 'wb')
    download_hash = None
    if link.hash and link.hash_name:
        try:
            download_hash = hashlib.new(link.hash_name)
        except ValueError:
            logger.warn("Unsupported hash name %s for package %s" % (link.hash_name, link))
    try:
        total_length = int(resp.info()['content-length'])
    except (ValueError, KeyError, TypeError):
        total_length = 0
    downloaded = 0
    show_progress = total_length > 40*1000 or not total_length
    show_url = link.show_url
    try:
        if show_progress:
            ## FIXME: the URL can get really long in this message:
            if total_length:
                logger.start_progress('Downloading %s (%s): ' % (show_url, format_size(total_length)))
            else:
                logger.start_progress('Downloading %s (unknown size): ' % show_url)
        else:
            logger.notify('Downloading %s' % show_url)
        logger.debug('Downloading from URL %s' % link)

        while True:
            chunk = resp.read(4096)
            if not chunk:
                break
            downloaded += len(chunk)
            if show_progress:
                if not total_length:
                    logger.show_progress('%s' % format_size(downloaded))
                else:
                    logger.show_progress('%3i%%  %s' % (100*downloaded/total_length, format_size(downloaded)))
            if download_hash is not None:
                download_hash.update(chunk)
            fp.write(chunk)
        fp.close()
    finally:
        if show_progress:
            logger.end_progress('%s downloaded' % format_size(downloaded))
    return download_hash
Example #19
0
    def _link_package_versions(self, link, search_name):
        """
        Return an iterable of triples (pkg_resources_version_key,
        link, python_version) that can be extracted from the given
        link.

        Meant to be overridden by subclasses, not called by clients.
        """
        if link.egg_fragment:
            egg_info = link.egg_fragment
        else:
            egg_info, ext = link.splitext()
            if not ext:
                if link not in self.logged_links:
                    logger.debug('Skipping link %s; not a file' % link)
                    self.logged_links.add(link)
                return []
            if egg_info.endswith('.tar'):
                # Special double-extension case:
                egg_info = egg_info[:-4]
                ext = '.tar' + ext
            if ext not in ('.tar.gz', '.tar.bz2', '.tar', '.tgz', '.zip'):
                if link not in self.logged_links:
                    logger.debug(
                        'Skipping link %s; unknown archive format: %s' %
                        (link, ext))
                    self.logged_links.add(link)
                return []
        version = self._egg_info_matches(egg_info, search_name, link)
        if version is None:
            logger.debug('Skipping link %s; wrong project name (not %s)' %
                         (link, search_name))
            return []
        match = self._py_version_re.search(version)
        if match:
            version = version[:match.start()]
            py_version = match.group(1)
            if py_version != sys.version[:3]:
                logger.debug(
                    'Skipping %s because Python version is incorrect' % link)
                return []
        logger.debug('Found link %s, version: %s' % (link, version))
        return [(pkg_resources.parse_version(version), link, version)]
Example #20
0
 def _package_versions(self, links, search_name):
     seen_links = {}
     for link in self._sort_links(links):
         if link.url in seen_links:
             continue
         seen_links[link.url] = None
         if link.egg_fragment:
             egg_info = link.egg_fragment
         else:
             path = link.path
             egg_info, ext = link.splitext()
             if not ext:
                 if link not in self.logged_links:
                     logger.debug('Skipping link %s; not a file' % link)
                     self.logged_links.add(link)
                 continue
             if egg_info.endswith('.tar'):
                 # Special double-extension case:
                 egg_info = egg_info[:-4]
                 ext = '.tar' + ext
             if ext not in ('.tar.gz', '.tar.bz2', '.tar', '.tgz', '.zip'):
                 if link not in self.logged_links:
                     logger.debug(
                         'Skipping link %s; unknown archive format: %s' %
                         (link, ext))
                     self.logged_links.add(link)
                 continue
         version = self._egg_info_matches(egg_info, search_name, link)
         if version is None:
             logger.debug('Skipping link %s; wrong project name (not %s)' %
                          (link, search_name))
             continue
         match = self._py_version_re.search(version)
         if match:
             version = version[:match.start()]
             py_version = match.group(1)
             if py_version != sys.version[:3]:
                 logger.debug(
                     'Skipping %s because Python version is incorrect' %
                     link)
                 continue
         logger.debug('Found link %s, version: %s' % (link, version))
         yield (pkg_resources.parse_version(version), link, version)
Example #21
0
    def safe_requirements(self):
        """
        safe implementation of pip.req.InstallRequirement.requirements() generator, doesn't blow up with OSError
        """

        in_extra = None
        try:
            for line in self.egg_info_lines('requires.txt'):
                match = self._requirements_section_re.match(line.lower())
                if match:
                    in_extra = match.group(1)
                    continue
                if in_extra:
                    logger.debug('skipping extra %s' % in_extra)
                    # Skip requirement for an extra we aren't requiring
                    continue
                yield line
        except OSError:
            pass
Example #22
0
    def safe_requirements(self):
        """
        safe implementation of pip.req.InstallRequirement.requirements() generator, doesn't blow up with OSError
        """

        in_extra = None
        try:
            for line in self.egg_info_lines('requires.txt'):
                match = self._requirements_section_re.match(line.lower())
                if match:
                    in_extra = match.group(1)
                    continue
                if in_extra:
                    logger.debug('skipping extra %s' % in_extra)
                    # Skip requirement for an extra we aren't requiring
                    continue
                yield line
        except OSError:
            pass
Example #23
0
    def _get_pages(self, locations, req):
        """
        Yields (page, page_url) from the given locations, skipping
        locations that have errors, and adding download/homepage links
        """
        all_locations = list(locations)
        seen = set()

        while all_locations:
            location = all_locations.pop(0)
            if location in seen:
                continue
            seen.add(location)

            page = self._get_page(location, req)
            if page is None:
                continue

            yield page

            for link in page.rel_links():
                normalized = normalize_name(req.name).lower()

                if (normalized not in self.allow_external
                        and not self.allow_all_external):
                    self.need_warn_external = True
                    logger.debug("Not searching %s for files because external "
                                 "urls are disallowed." % link)
                    continue

                if (link.trusted is not None
                        and not link.trusted
                        and normalized not in self.allow_unverified):
                    logger.debug(
                        "Not searching %s for urls, it is an "
                        "untrusted link and cannot produce safe or "
                        "verifiable files." % link
                    )
                    self.need_warn_unverified = True
                    continue

                all_locations.append(link)
Example #24
0
    def _get_pages(self, locations, req):
        """
        Yields (page, page_url) from the given locations, skipping
        locations that have errors, and adding download/homepage links
        """
        all_locations = list(locations)
        seen = set()

        while all_locations:
            location = all_locations.pop(0)
            if location in seen:
                continue
            seen.add(location)

            page = self._get_page(location, req)
            if page is None:
                continue

            yield page

            for link in page.rel_links():
                normalized = normalize_name(req.name).lower()

                if (normalized not in self.allow_external
                        and not self.allow_all_external):
                    self.need_warn_external = True
                    logger.debug("Not searching %s for files because external "
                                 "urls are disallowed." % link)
                    continue

                if (link.trusted is not None
                        and not link.trusted
                        and normalized not in self.allow_unverified):
                    logger.debug(
                        "Not searching %s for urls, it is an "
                        "untrusted link and cannot produce safe or "
                        "verifiable files." % link
                    )
                    self.need_warn_unverified = True
                    continue

                all_locations.append(link)
Example #25
0
def _download_url(resp, link, temp_location):
    fp = open(temp_location, "wb")
    download_hash = None
    if link.md5_hash:
        download_hash = md5()
    try:
        total_length = int(resp.info()["content-length"])
    except (ValueError, KeyError):
        total_length = 0
    downloaded = 0
    show_progress = total_length > 40 * 1000 or not total_length
    show_url = link.show_url
    try:
        if show_progress:
            ## FIXME: the URL can get really long in this message:
            if total_length:
                logger.start_progress("Downloading %s (%s): " % (show_url, format_size(total_length)))
            else:
                logger.start_progress("Downloading %s (unknown size): " % show_url)
        else:
            logger.notify("Downloading %s" % show_url)
        logger.debug("Downloading from URL %s" % link)

        while True:
            chunk = resp.read(4096)
            if not chunk:
                break
            downloaded += len(chunk)
            if show_progress:
                if not total_length:
                    logger.show_progress("%s" % format_size(downloaded))
                else:
                    logger.show_progress("%3i%%  %s" % (100 * downloaded / total_length, format_size(downloaded)))
            if link.md5_hash:
                download_hash.update(chunk)
            fp.write(chunk)
        fp.close()
    finally:
        if show_progress:
            logger.end_progress("%s downloaded" % format_size(downloaded))
    return download_hash
Example #26
0
def _download_url(resp, link, temp_location):
    fp = open(temp_location, 'wb')
    download_hash = None
    if link.md5_hash:
        download_hash = md5()
    try:
        total_length = int(resp.info()['content-length'])
    except (ValueError, KeyError):
        total_length = 0
    downloaded = 0
    show_progress = total_length > 40*1000 or not total_length
    show_url = link.show_url
    try:
        if show_progress:
            ## FIXME: the URL can get really long in this message:
            if total_length:
                logger.start_progress('Downloading {0!s} ({1!s}): '.format(show_url, format_size(total_length)))
            else:
                logger.start_progress('Downloading {0!s} (unknown size): '.format(show_url))
        else:
            logger.notify('Downloading {0!s}'.format(show_url))
        logger.debug('Downloading from URL {0!s}'.format(link))

        while True:
            chunk = resp.read(4096)
            if not chunk:
                break
            downloaded += len(chunk)
            if show_progress:
                if not total_length:
                    logger.show_progress('{0!s}'.format(format_size(downloaded)))
                else:
                    logger.show_progress('{0:3d}%  {1!s}'.format(100*downloaded/total_length, format_size(downloaded)))
            if link.md5_hash:
                download_hash.update(chunk)
            fp.write(chunk)
        fp.close()
    finally:
        if show_progress:
            logger.end_progress('{0!s} downloaded'.format(format_size(downloaded)))
    return download_hash
Example #27
0
    def _link_package_versions(self, link, search_name):
        """
        Return an iterable of triples (pkg_resources_version_key,
        link, python_version) that can be extracted from the given
        link.

        Meant to be overridden by subclasses, not called by clients.
        """
        if link.egg_fragment:
            egg_info = link.egg_fragment
        else:
            egg_info, ext = link.splitext()
            if not ext:
                if link not in self.logged_links:
                    logger.debug('Skipping link {0!s}; not a file'.format(link))
                    self.logged_links.add(link)
                return []
            if egg_info.endswith('.tar'):
                # Special double-extension case:
                egg_info = egg_info[:-4]
                ext = '.tar' + ext
            if ext not in ('.tar.gz', '.tar.bz2', '.tar', '.tgz', '.zip'):
                if link not in self.logged_links:
                    logger.debug('Skipping link {0!s}; unknown archive format: {1!s}'.format(link, ext))
                    self.logged_links.add(link)
                return []
        version = self._egg_info_matches(egg_info, search_name, link)
        if version is None:
            logger.debug('Skipping link {0!s}; wrong project name (not {1!s})'.format(link, search_name))
            return []
        match = self._py_version_re.search(version)
        if match:
            version = version[:match.start()]
            py_version = match.group(1)
            if py_version != sys.version[:3]:
                logger.debug('Skipping {0!s} because Python version is incorrect'.format(link))
                return []
        logger.debug('Found link {0!s}, version: {1!s}'.format(link, version))
        return [(pkg_resources.parse_version(version),
               link,
               version)]
Example #28
0
 def _package_versions(self, links, search_name):
     seen_links = {}
     for link in self._sort_links(links):
         if link.url in seen_links:
             continue
         seen_links[link.url] = None
         if link.egg_fragment:
             egg_info = link.egg_fragment
         else:
             path = link.path
             egg_info, ext = link.splitext()
             if not ext:
                 if link not in self.logged_links:
                     logger.debug('Skipping link %s; not a file' % link)
                     self.logged_links.add(link)
                 continue
             if egg_info.endswith('.tar'):
                 # Special double-extension case:
                 egg_info = egg_info[:-4]
                 ext = '.tar' + ext
             if ext not in ('.tar.gz', '.tar.bz2', '.tar', '.tgz', '.zip'):
                 if link not in self.logged_links:
                     logger.debug('Skipping link %s; unknown archive format: %s' % (link, ext))
                     self.logged_links.add(link)
                 continue
         version = self._egg_info_matches(egg_info, search_name, link)
         if version is None:
             logger.debug('Skipping link %s; wrong project name (not %s)' % (link, search_name))
             continue
         match = self._py_version_re.search(version)
         if match:
             version = version[:match.start()]
             py_version = match.group(1)
             if py_version != sys.version[:3]:
                 logger.debug('Skipping %s because Python version is incorrect' % link)
                 continue
         logger.debug('Found link %s, version: %s' % (link, version))
         yield (pkg_resources.parse_version(version),
                link,
                version)
Example #29
0
 def get_page(cls, link, req, cache=None, skip_archives=True):
     url = link.url
     url = url.split('#', 1)[0]
     if cache.too_many_failures(url):
         return None
     if url.lower().startswith('svn'):
         logger.debug('Cannot look at svn URL %s' % link)
         return None
     if cache is not None:
         inst = cache.get_page(url)
         if inst is not None:
             return inst
     try:
         if skip_archives:
             if cache is not None:
                 if cache.is_archive(url):
                     return None
             filename = link.filename
             for bad_ext in ['.tar', '.tar.gz', '.tar.bz2', '.tgz', '.zip']:
                 if filename.endswith(bad_ext):
                     content_type = cls._get_content_type(url)
                     if content_type.lower().startswith('text/html'):
                         break
                     else:
                         logger.debug('Skipping page %s because of Content-Type: %s' % (link, content_type))
                         if cache is not None:
                             cache.set_is_archive(url)
                         return None
         logger.debug('Getting page %s' % url)
         resp = urllib2.urlopen(url)
         real_url = resp.geturl()
         headers = resp.info()
         inst = cls(resp.read(), real_url, headers)
     except (urllib2.HTTPError, urllib2.URLError, socket.timeout, socket.error), e:
         desc = str(e)
         if isinstance(e, socket.timeout):
             log_meth = logger.info
             level =1
             desc = 'timed out'
         elif isinstance(e, urllib2.URLError):
             log_meth = logger.info
             if hasattr(e, 'reason') and isinstance(e.reason, socket.timeout):
                 desc = 'timed out'
                 level = 1
             else:
                 level = 2
         elif isinstance(e, urllib2.HTTPError) and e.code == 404:
             ## FIXME: notify?
             log_meth = logger.info
             level = 2
         else:
             log_meth = logger.info
             level = 1
         log_meth('Could not fetch URL %s: %s' % (link, desc))
         log_meth('Will skip URL %s when looking for download links for %s' % (link.url, req))
         if cache is not None:
             cache.add_page_failure(url, level)
         return None
Example #30
0
    def find_requirement(self, req, upgrade):
        def mkurl_pypi_url(url):
            loc = posixpath.join(url, url_name)
            # For maximum compatibility with easy_install, ensure the path
            # ends in a trailing slash.  Although this isn't in the spec
            # (and PyPI can handle it without the slash) some other index
            # implementations might break if they relied on easy_install's behavior.
            if not loc.endswith('/'):
                loc = loc + '/'
            return loc

        url_name = req.url_name
        # Only check main index if index URL is given:
        main_index_url = None
        if self.index_urls:
            # Check that we have the url_name correctly spelled:
            main_index_url = Link(mkurl_pypi_url(self.index_urls[0]),
                                  trusted=True)
            # This will also cache the page, so it's okay that we get it again later:
            page = self._get_page(main_index_url, req)
            if page is None:
                url_name = self._find_url_name(
                    Link(self.index_urls[0], trusted=True), url_name,
                    req) or req.url_name

        if url_name is not None:
            locations = [mkurl_pypi_url(url)
                         for url in self.index_urls] + self.find_links
        else:
            locations = list(self.find_links)
        for version in req.absolute_versions:
            if url_name is not None and main_index_url is not None:
                locations = [posixpath.join(main_index_url.url, version)
                             ] + locations

        file_locations, url_locations = self._sort_locations(locations)
        _flocations, _ulocations = self._sort_locations(self.dependency_links)
        file_locations.extend(_flocations)

        # We trust every url that the user has given us whether it was given
        #   via --index-url or --find-links
        locations = [Link(url, trusted=True) for url in url_locations]

        # We explicitly do not trust links that came from dependency_links
        locations.extend([Link(url) for url in _ulocations])

        logger.debug('URLs to search for versions for %s:' % req)
        for location in locations:
            logger.debug('* %s' % location)

            # Determine if this url used a secure transport mechanism
            parsed = urlparse.urlparse(str(location))
            if parsed.scheme in INSECURE_SCHEMES:
                secure_schemes = INSECURE_SCHEMES[parsed.scheme]

                if len(secure_schemes) == 1:
                    ctx = (location, parsed.scheme, secure_schemes[0],
                           parsed.netloc)
                    logger.warn("%s uses an insecure transport scheme (%s). "
                                "Consider using %s if %s has it available" %
                                ctx)
                elif len(secure_schemes) > 1:
                    ctx = (location, parsed.scheme, ", ".join(secure_schemes),
                           parsed.netloc)
                    logger.warn("%s uses an insecure transport scheme (%s). "
                                "Consider using one of %s if %s has any of "
                                "them available" % ctx)
                else:
                    ctx = (location, parsed.scheme)
                    logger.warn("%s uses an insecure transport scheme (%s)." %
                                ctx)

        found_versions = []
        found_versions.extend(
            self._package_versions(
                # We trust every directly linked archive in find_links
                [Link(url, '-f', trusted=True) for url in self.find_links],
                req.name.lower()))
        page_versions = []
        for page in self._get_pages(locations, req):
            logger.debug('Analyzing links from page %s' % page.url)
            logger.indent += 2
            try:
                page_versions.extend(
                    self._package_versions(page.links, req.name.lower()))
            finally:
                logger.indent -= 2
        dependency_versions = list(
            self._package_versions(
                [Link(url) for url in self.dependency_links],
                req.name.lower()))
        if dependency_versions:
            logger.info('dependency_links found: %s' % ', '.join(
                [link.url for parsed, link, version in dependency_versions]))
        file_versions = list(
            self._package_versions([Link(url) for url in file_locations],
                                   req.name.lower()))
        if not found_versions and not page_versions and not dependency_versions and not file_versions:
            logger.fatal(
                'Could not find any downloads that satisfy the requirement %s'
                % req)

            if self.need_warn_external:
                logger.warn("Some externally hosted files were ignored (use "
                            "--allow-external %s to allow)." % req.name)

            if self.need_warn_unverified:
                logger.warn("Some insecure and unverifiable files were ignored"
                            " (use --allow-unverified %s to allow)." %
                            req.name)

            raise DistributionNotFound('No distributions at all found for %s' %
                                       req)
        installed_version = []
        if req.satisfied_by is not None:
            installed_version = [(req.satisfied_by.parsed_version,
                                  INSTALLED_VERSION, req.satisfied_by.version)]
        if file_versions:
            file_versions.sort(reverse=True)
            logger.info('Local files found: %s' % ', '.join([
                url_to_path(link.url)
                for parsed, link, version in file_versions
            ]))
        #this is an intentional priority ordering
        all_versions = installed_version + file_versions + found_versions + page_versions + dependency_versions
        applicable_versions = []
        for (parsed_version, link, version) in all_versions:
            if version not in req.req:
                logger.info("Ignoring link %s, version %s doesn't match %s" %
                            (link, version, ','.join(
                                [''.join(s) for s in req.req.specs])))
                continue
            elif is_prerelease(version) and not (self.allow_all_prereleases
                                                 or req.prereleases):
                # If this version isn't the already installed one, then
                #   ignore it if it's a pre-release.
                if link is not INSTALLED_VERSION:
                    logger.info(
                        "Ignoring link %s, version %s is a pre-release (use --pre to allow)."
                        % (link, version))
                    continue
            applicable_versions.append((parsed_version, link, version))
        applicable_versions = self._sort_versions(applicable_versions)
        existing_applicable = bool([
            link for parsed_version, link, version in applicable_versions
            if link is INSTALLED_VERSION
        ])
        if not upgrade and existing_applicable:
            if applicable_versions[0][1] is INSTALLED_VERSION:
                logger.info(
                    'Existing installed version (%s) is most up-to-date and satisfies requirement'
                    % req.satisfied_by.version)
            else:
                logger.info(
                    'Existing installed version (%s) satisfies requirement (most up-to-date version is %s)'
                    % (req.satisfied_by.version, applicable_versions[0][2]))
            return None
        if not applicable_versions:
            logger.fatal(
                'Could not find a version that satisfies the requirement %s (from versions: %s)'
                % (req, ', '.join([
                    version for parsed_version, link, version in all_versions
                ])))

            if self.need_warn_external:
                logger.warn("Some externally hosted files were ignored (use "
                            "--allow-external to allow).")

            if self.need_warn_unverified:
                logger.warn("Some insecure and unverifiable files were ignored"
                            " (use --allow-unverified %s to allow)." %
                            req.name)

            raise DistributionNotFound(
                'No distributions matching the version for %s' % req)
        if applicable_versions[0][1] is INSTALLED_VERSION:
            # We have an existing version, and its the best version
            logger.info(
                'Installed version (%s) is most up-to-date (past versions: %s)'
                % (req.satisfied_by.version, ', '.join([
                    version for parsed_version, link, version in
                    applicable_versions[1:]
                ]) or 'none'))
            raise BestVersionAlreadyInstalled
        if len(applicable_versions) > 1:
            logger.info(
                'Using version %s (newest of versions: %s)' %
                (applicable_versions[0][2], ', '.join([
                    version
                    for parsed_version, link, version in applicable_versions
                ])))

        selected_version = applicable_versions[0][1]

        # TODO: Remove after 1.4 has been released
        # if (selected_version.internal is not None
        # and not selected_version.internal):
        # logger.warn("You are installing an externally hosted file. Future "
        # "versions of pip will default to disallowing "
        # "externally hosted files.")

        # if (selected_version.verifiable is not None
        # and not selected_version.verifiable):
        # logger.warn("You are installing a potentially insecure and "
        # "unverifiable file. Future versions of pip will "
        # "default to disallowing insecure files.")

        if selected_version._deprecated_regex:
            logger.deprecated(
                "1.7", "%s discovered using a deprecated method of parsing, "
                "in the future it will no longer be discovered" % req.name)

        return selected_version
Example #31
0
    def get_page(cls, link, req, skip_archives=True, session=None):
        if session is None:
            raise TypeError(
                "get_page() missing 1 required keyword argument: 'session'"
            )

        url = link.url
        url = url.split('#', 1)[0]

        # Check for VCS schemes that do not support lookup as web pages.
        from pip.vcs import VcsSupport
        for scheme in VcsSupport.schemes:
            if url.lower().startswith(scheme) and url[len(scheme)] in '+:':
                logger.debug(
                    'Cannot look at %(scheme)s URL %(link)s' % locals()
                )
                return None

        try:
            if skip_archives:
                filename = link.filename
                for bad_ext in ['.tar', '.tar.gz', '.tar.bz2', '.tgz', '.zip']:
                    if filename.endswith(bad_ext):
                        content_type = cls._get_content_type(
                            url, session=session,
                        )
                        if content_type.lower().startswith('text/html'):
                            break
                        else:
                            logger.debug(
                                'Skipping page %s because of Content-Type: '
                                '%s' % (link, content_type)
                            )
                            return

            logger.debug('Getting page %s' % url)

            # Tack index.html onto file:// URLs that point to directories
            (scheme, netloc, path, params, query, fragment) = \
                urlparse.urlparse(url)
            if scheme == 'file' and os.path.isdir(url2pathname(path)):
                # add trailing slash if not present so urljoin doesn't trim
                # final segment
                if not url.endswith('/'):
                    url += '/'
                url = urlparse.urljoin(url, 'index.html')
                logger.debug(' file: URL is directory, getting %s' % url)

            resp = session.get(
                url,
                headers={
                    "Accept": "text/html",
                    "Cache-Control": "max-age=600",
                },
            )
            resp.raise_for_status()

            # The check for archives above only works if the url ends with
            #   something that looks like an archive. However that is not a
            #   requirement of an url. Unless we issue a HEAD request on every
            #   url we cannot know ahead of time for sure if something is HTML
            #   or not. However we can check after we've downloaded it.
            content_type = resp.headers.get('Content-Type', 'unknown')
            if not content_type.lower().startswith("text/html"):
                logger.debug(
                    'Skipping page %s because of Content-Type: %s' %
                    (link, content_type)
                )
                return

            inst = cls(resp.text, resp.url, resp.headers, trusted=link.trusted)
        except requests.HTTPError as exc:
            level = 2 if exc.response.status_code == 404 else 1
            cls._handle_fail(req, link, exc, url, level=level)
        except requests.ConnectionError as exc:
            cls._handle_fail(
                req, link, "connection error: %s" % exc, url,
            )
        except requests.Timeout:
            cls._handle_fail(req, link, "timed out", url)
        except SSLError as exc:
            reason = ("There was a problem confirming the ssl certificate: "
                      "%s" % exc)
            cls._handle_fail(
                req, link, reason, url,
                level=2,
                meth=logger.notify,
            )
        else:
            return inst
Example #32
0
    def get_page(cls, link, req, cache=None, skip_archives=True):
        url = link.url
        url = url.split('#', 1)[0]
        if cache.too_many_failures(url):
            return None

        # Check for VCS schemes that do not support lookup as web pages.
        from pip.vcs import VcsSupport
        for scheme in VcsSupport.schemes:
            if url.lower().startswith(scheme) and url[len(scheme)] in '+:':
                logger.debug('Cannot look at %(scheme)s URL %(link)s' %
                             locals())
                return None

        if cache is not None:
            inst = cache.get_page(url)
            if inst is not None:
                return inst
        try:
            if skip_archives:
                if cache is not None:
                    if cache.is_archive(url):
                        return None
                filename = link.filename
                for bad_ext in ['.tar', '.tar.gz', '.tar.bz2', '.tgz', '.zip']:
                    if filename.endswith(bad_ext):
                        content_type = cls._get_content_type(url)
                        if content_type.lower().startswith('text/html'):
                            break
                        else:
                            logger.debug(
                                'Skipping page %s because of Content-Type: %s'
                                % (link, content_type))
                            if cache is not None:
                                cache.set_is_archive(url)
                            return None
            logger.debug('Getting page %s' % url)

            # Tack list.html onto file:// URLs that point to directories
            (scheme, netloc, path, params, query,
             fragment) = urlparse.urlparse(url)
            if scheme == 'file' and os.path.isdir(url2pathname(path)):
                # add trailing slash if not present so urljoin doesn't trim final segment
                if not url.endswith('/'):
                    url += '/'
                url = urlparse.urljoin(url, 'list.html')
                logger.debug(' file: URL is directory, getting %s' % url)

            resp = urlopen(url)

            real_url = geturl(resp)
            headers = resp.info()
            contents = resp.read()
            encoding = headers.get('Content-Encoding', None)
            #XXX need to handle exceptions and add testing for this
            if encoding is not None:
                if encoding == 'gzip':
                    contents = gzip.GzipFile(fileobj=BytesIO(contents)).read()
                if encoding == 'deflate':
                    contents = zlib.decompress(contents)

            # The check for archives above only works if the url ends with
            #   something that looks like an archive. However that is not a
            #   requirement. For instance http://sourceforge.net/projects/docutils/files/docutils/0.8.1/docutils-0.8.1.tar.gz/download
            #   redirects to http://superb-dca3.dl.sourceforge.net/project/docutils/docutils/0.8.1/docutils-0.8.1.tar.gz
            #   Unless we issue a HEAD request on every url we cannot know
            #   ahead of time for sure if something is HTML or not. However we
            #   can check after we've downloaded it.
            content_type = headers.get('Content-Type', 'unknown')
            if not content_type.lower().startswith("text/html"):
                logger.debug('Skipping page %s because of Content-Type: %s' %
                             (link, content_type))
                if cache is not None:
                    cache.set_is_archive(url)
                return None

            inst = cls(u(contents), real_url, headers, trusted=link.trusted)
        except (HTTPError, URLError, socket.timeout, socket.error, OSError,
                WindowsError):
            e = sys.exc_info()[1]
            desc = str(e)
            if isinstance(e, socket.timeout):
                log_meth = logger.info
                level = 1
                desc = 'timed out'
            elif isinstance(e, URLError):
                #ssl/certificate error
                if hasattr(e, 'reason') and (
                        isinstance(e.reason, ssl.SSLError)
                        or isinstance(e.reason, CertificateError)):
                    desc = 'There was a problem confirming the ssl certificate: %s' % e
                    log_meth = logger.notify
                else:
                    log_meth = logger.info
                if hasattr(e, 'reason') and isinstance(e.reason,
                                                       socket.timeout):
                    desc = 'timed out'
                    level = 1
                else:
                    level = 2
            elif isinstance(e, HTTPError) and e.code == 404:
                ## FIXME: notify?
                log_meth = logger.info
                level = 2
            else:
                log_meth = logger.info
                level = 1
            log_meth('Could not fetch URL %s: %s' % (link, desc))
            log_meth(
                'Will skip URL %s when looking for download links for %s' %
                (link.url, req))
            if cache is not None:
                cache.add_page_failure(url, level)
            return None
        if cache is not None:
            cache.add_page([url, real_url], inst)
        return inst
Example #33
0
    def find_requirement(self, req, upgrade):

        def mkurl_pypi_url(url):
            loc = posixpath.join(url, url_name)
            # For maximum compatibility with easy_install, ensure the path
            # ends in a trailing slash.  Although this isn't in the spec
            # (and PyPI can handle it without the slash) some other index
            # implementations might break if they relied on easy_install's behavior.
            if not loc.endswith('/'):
                loc = loc + '/'
            return loc

        url_name = req.url_name
        # Only check main index if index URL is given:
        main_index_url = None
        if self.index_urls:
            # Check that we have the url_name correctly spelled:
            main_index_url = Link(mkurl_pypi_url(self.index_urls[0]), trusted=True)
            # This will also cache the page, so it's okay that we get it again later:
            page = self._get_page(main_index_url, req)
            if page is None:
                url_name = self._find_url_name(Link(self.index_urls[0], trusted=True), url_name, req) or req.url_name

        # Combine index URLs with mirror URLs here to allow
        # adding more index URLs from requirements files
        all_index_urls = self.index_urls + self.mirror_urls

        if url_name is not None:
            locations = [
                mkurl_pypi_url(url)
                for url in all_index_urls] + self.find_links
        else:
            locations = list(self.find_links)
        for version in req.absolute_versions:
            if url_name is not None and main_index_url is not None:
                locations = [
                    posixpath.join(main_index_url.url, version)] + locations

        file_locations, url_locations = self._sort_locations(locations)
        _flocations, _ulocations = self._sort_locations(self.dependency_links)
        file_locations.extend(_flocations)

        # We trust every url that the user has given us whether it was given
        #   via --index-url, --user-mirrors/--mirror, or --find-links or a
        #   default option thereof
        locations = [Link(url, trusted=True) for url in url_locations]

        # We explicitly do not trust links that came from dependency_links
        locations.extend([Link(url) for url in _ulocations])

        logger.debug('URLs to search for versions for %s:' % req)
        for location in locations:
            logger.debug('* %s' % location)
        found_versions = []
        found_versions.extend(
            self._package_versions(
                # We trust every directly linked archive in find_links
                [Link(url, '-f', trusted=True) for url in self.find_links], req.name.lower()))
        page_versions = []
        for page in self._get_pages(locations, req):
            logger.debug('Analyzing links from page %s' % page.url)
            logger.indent += 2
            try:
                page_versions.extend(self._package_versions(page.links, req.name.lower()))
            finally:
                logger.indent -= 2
        dependency_versions = list(self._package_versions(
            [Link(url) for url in self.dependency_links], req.name.lower()))
        if dependency_versions:
            logger.info('dependency_links found: %s' % ', '.join([link.url for parsed, link, version in dependency_versions]))
        file_versions = list(self._package_versions(
                [Link(url) for url in file_locations], req.name.lower()))
        if not found_versions and not page_versions and not dependency_versions and not file_versions:
            logger.fatal('Could not find any downloads that satisfy the requirement %s' % req)

            if self.need_warn_external:
                logger.warn("Some externally hosted files were ignored (use "
                            "--allow-external %s to allow)." % req.name)

            if self.need_warn_insecure:
                logger.warn("Some insecure and unverifiable files were ignored"
                            " (use --allow-insecure %s to allow)." % req.name)

            raise DistributionNotFound('No distributions at all found for %s' % req)
        installed_version = []
        if req.satisfied_by is not None:
            installed_version = [(req.satisfied_by.parsed_version, InfLink, req.satisfied_by.version)]
        if file_versions:
            file_versions.sort(reverse=True)
            logger.info('Local files found: %s' % ', '.join([url_to_path(link.url) for parsed, link, version in file_versions]))
        #this is an intentional priority ordering
        all_versions = installed_version + file_versions + found_versions + page_versions + dependency_versions
        applicable_versions = []
        for (parsed_version, link, version) in all_versions:
            if version not in req.req:
                logger.info("Ignoring link %s, version %s doesn't match %s"
                            % (link, version, ','.join([''.join(s) for s in req.req.specs])))
                continue
            elif is_prerelease(version) and not (self.allow_all_prereleases or req.prereleases):
                # If this version isn't the already installed one, then
                #   ignore it if it's a pre-release.
                if link is not InfLink:
                    logger.info("Ignoring link %s, version %s is a pre-release (use --pre to allow)." % (link, version))
                    continue
            applicable_versions.append((parsed_version, link, version))
        applicable_versions = self._sort_versions(applicable_versions)
        existing_applicable = bool([link for parsed_version, link, version in applicable_versions if link is InfLink])
        if not upgrade and existing_applicable:
            if applicable_versions[0][1] is InfLink:
                logger.info('Existing installed version (%s) is most up-to-date and satisfies requirement'
                            % req.satisfied_by.version)
            else:
                logger.info('Existing installed version (%s) satisfies requirement (most up-to-date version is %s)'
                            % (req.satisfied_by.version, applicable_versions[0][2]))
            return None
        if not applicable_versions:
            logger.fatal('Could not find a version that satisfies the requirement %s (from versions: %s)'
                         % (req, ', '.join([version for parsed_version, link, version in all_versions])))

            if self.need_warn_external:
                logger.warn("Some externally hosted files were ignored (use "
                            "--allow-external to allow).")

            if self.need_warn_insecure:
                logger.warn("Some insecure and unverifiable files were ignored"
                            " (use --allow-insecure %s to allow)." % req.name)

            raise DistributionNotFound('No distributions matching the version for %s' % req)
        if applicable_versions[0][1] is InfLink:
            # We have an existing version, and its the best version
            logger.info('Installed version (%s) is most up-to-date (past versions: %s)'
                        % (req.satisfied_by.version, ', '.join([version for parsed_version, link, version in applicable_versions[1:]]) or 'none'))
            raise BestVersionAlreadyInstalled
        if len(applicable_versions) > 1:
            logger.info('Using version %s (newest of versions: %s)' %
                        (applicable_versions[0][2], ', '.join([version for parsed_version, link, version in applicable_versions])))

        selected_version = applicable_versions[0][1]

        # TODO: Remove after 1.4 has been released
        if (selected_version.internal is not None
                and not selected_version.internal):
            logger.warn("You are installing an externally hosted file. Future "
                        "versions of pip will default to disallowing "
                        "externally hosted files.")

        if (selected_version.verifiable is not None
                and not selected_version.verifiable):
            logger.warn("You are installing a potentially insecure and "
                        "unverifiable file. Future versions of pip will "
                        "default to disallowing insecure files.")

        return selected_version
Example #34
0
    def get_page(cls, link, req, cache=None, skip_archives=True, session=None):
        if session is None:
            session = PipSession()

        url = link.url
        url = url.split('#', 1)[0]
        if cache.too_many_failures(url):
            return None

        # Check for VCS schemes that do not support lookup as web pages.
        from pip.vcs import VcsSupport
        for scheme in VcsSupport.schemes:
            if url.lower().startswith(scheme) and url[len(scheme)] in '+:':
                logger.debug(
                    'Cannot look at %(scheme)s URL %(link)s' % locals()
                )
                return None

        if cache is not None:
            inst = cache.get_page(url)
            if inst is not None:
                return inst
        try:
            if skip_archives:
                if cache is not None:
                    if cache.is_archive(url):
                        return None
                filename = link.filename
                for bad_ext in ['.tar', '.tar.gz', '.tar.bz2', '.tgz', '.zip']:
                    if filename.endswith(bad_ext):
                        content_type = cls._get_content_type(
                            url, session=session,
                        )
                        if content_type.lower().startswith('text/html'):
                            break
                        else:
                            logger.debug(
                                'Skipping page %s because of Content-Type: '
                                '%s' % (link, content_type)
                            )
                            if cache is not None:
                                cache.set_is_archive(url)
                            return None
            logger.debug('Getting page %s' % url)

            # Tack index.html onto file:// URLs that point to directories
            (scheme, netloc, path, params, query, fragment) = \
                urlparse.urlparse(url)
            if scheme == 'file' and os.path.isdir(url2pathname(path)):
                # add trailing slash if not present so urljoin doesn't trim
                # final segment
                if not url.endswith('/'):
                    url += '/'
                url = urlparse.urljoin(url, 'index.html')
                logger.debug(' file: URL is directory, getting %s' % url)

            resp = session.get(url, headers={"Accept": "text/html"})
            resp.raise_for_status()

            # The check for archives above only works if the url ends with
            #   something that looks like an archive. However that is not a
            #   requirement of an url. Unless we issue a HEAD request on every
            #   url we cannot know ahead of time for sure if something is HTML
            #   or not. However we can check after we've downloaded it.
            content_type = resp.headers.get('Content-Type', 'unknown')
            if not content_type.lower().startswith("text/html"):
                logger.debug(
                    'Skipping page %s because of Content-Type: %s' %
                    (link, content_type)
                )
                if cache is not None:
                    cache.set_is_archive(url)
                return None

            inst = cls(resp.text, resp.url, resp.headers, trusted=link.trusted)
        except requests.HTTPError as exc:
            level = 2 if exc.response.status_code == 404 else 1
            cls._handle_fail(req, link, exc, url, cache=cache, level=level)
        except requests.ConnectionError as exc:
            cls._handle_fail(
                req, link, "connection error: %s" % exc, url,
                cache=cache,
            )
        except requests.Timeout:
            cls._handle_fail(req, link, "timed out", url, cache=cache)
        except SSLError as exc:
            reason = ("There was a problem confirming the ssl certificate: "
                      "%s" % exc)
            cls._handle_fail(
                req, link, reason, url,
                cache=cache,
                level=2,
                meth=logger.notify,
            )
        else:
            if cache is not None:
                cache.add_page([url, resp.url], inst)
            return inst
Example #35
0
            schemes.extend(backend.schemes)
        return schemes

    def register(self, cls):
        if not hasattr(cls, 'name'):
<<<<<<< HEAD
            logger.warn('Cannot register VCS %s' % cls.__name__)
            return
        if cls.name not in self._registry:
            self._registry[cls.name] = cls
=======
            logger.warning('Cannot register VCS %s', cls.__name__)
            return
        if cls.name not in self._registry:
            self._registry[cls.name] = cls
            logger.debug('Registered VCS backend: %s', cls.name)
>>>>>>> 54eef0be98b1b67c8507db91f4cfa90b64991027

    def unregister(self, cls=None, name=None):
        if name in self._registry:
            del self._registry[name]
        elif cls in self._registry.values():
            del self._registry[cls.name]
        else:
<<<<<<< HEAD
            logger.warn('Cannot unregister because no class or name given')
=======
            logger.warning('Cannot unregister because no class or name given')
>>>>>>> 54eef0be98b1b67c8507db91f4cfa90b64991027

    def get_backend_name(self, location):
Example #36
0
    def _link_package_versions(self, link, search_name):
        """
        Return an iterable of triples (pkg_resources_version_key,
        link, python_version) that can be extracted from the given
        link.

        Meant to be overridden by subclasses, not called by clients.
        """
        platform = get_platform()

        version = None
        if link.egg_fragment:
            egg_info = link.egg_fragment
        else:
            egg_info, ext = link.splitext()
            if not ext:
                if link not in self.logged_links:
                    logger.debug('Skipping link %s; not a file' % link)
                    self.logged_links.add(link)
                return []
            if egg_info.endswith('.tar'):
                # Special double-extension case:
                egg_info = egg_info[:-4]
                ext = '.tar' + ext
            if ext not in self._known_extensions():
                if link not in self.logged_links:
                    logger.debug(
                        'Skipping link %s; unknown archive format: %s' %
                        (link, ext)
                    )
                    self.logged_links.add(link)
                return []
            if "macosx10" in link.path and ext == '.zip':
                if link not in self.logged_links:
                    logger.debug('Skipping link %s; macosx10 one' % (link))
                    self.logged_links.add(link)
                return []
            if ext == wheel_ext:
                try:
                    wheel = Wheel(link.filename)
                except InvalidWheelFilename:
                    logger.debug(
                        'Skipping %s because the wheel filename is invalid' %
                        link
                    )
                    return []
                if wheel.name.lower() != search_name.lower():
                    logger.debug(
                        'Skipping link %s; wrong project name (not %s)' %
                        (link, search_name)
                    )
                    return []
                if not wheel.supported():
                    logger.debug(
                        'Skipping %s because it is not compatible with this '
                        'Python' % link
                    )
                    return []
                # This is a dirty hack to prevent installing Binary Wheels from
                # PyPI unless it is a Windows or Mac Binary Wheel. This is
                # paired with a change to PyPI disabling uploads for the
                # same. Once we have a mechanism for enabling support for
                # binary wheels on linux that deals with the inherent problems
                # of binary distribution this can be removed.
                comes_from = getattr(link, "comes_from", None)
                if (
                        (
                            not platform.startswith('win')
                            and not platform.startswith('macosx')
                        )
                        and comes_from is not None
                        and urlparse.urlparse(
                            comes_from.url
                        ).netloc.endswith("pypi.python.org")):
                    if not wheel.supported(tags=supported_tags_noarch):
                        logger.debug(
                            "Skipping %s because it is a pypi-hosted binary "
                            "Wheel on an unsupported platform" % link
                        )
                        return []
                version = wheel.version

        if not version:
            version = self._egg_info_matches(egg_info, search_name, link)
        if version is None:
            logger.debug(
                'Skipping link %s; wrong project name (not %s)' %
                (link, search_name)
            )
            return []

        if (link.internal is not None
                and not link.internal
                and not normalize_name(search_name).lower()
                in self.allow_external
                and not self.allow_all_external):
            # We have a link that we are sure is external, so we should skip
            #   it unless we are allowing externals
            logger.debug("Skipping %s because it is externally hosted." % link)
            self.need_warn_external = True
            return []

        if (link.verifiable is not None
                and not link.verifiable
                and not (normalize_name(search_name).lower()
                         in self.allow_unverified)):
            # We have a link that we are sure we cannot verify its integrity,
            #   so we should skip it unless we are allowing unsafe installs
            #   for this requirement.
            logger.debug("Skipping %s because it is an insecure and "
                         "unverifiable file." % link)
            self.need_warn_unverified = True
            return []

        match = self._py_version_re.search(version)
        if match:
            version = version[:match.start()]
            py_version = match.group(1)
            if py_version != sys.version[:3]:
                logger.debug(
                    'Skipping %s because Python version is incorrect' % link
                )
                return []
        logger.debug('Found link %s, version: %s' % (link, version))
        return [(
            pkg_resources.parse_version(version),
            link,
            version,
        )]
Example #37
0
    def get_page(cls, link, req, cache=None, skip_archives=True):
        url = link.url
        url = url.split('#', 1)[0]
        if cache.too_many_failures(url):
            return None

        # Check for VCS schemes that do not support lookup as web pages.
        from pip.vcs import VcsSupport
        for scheme in VcsSupport.schemes:
            if url.lower().startswith(scheme) and url[len(scheme)] in '+:':
                logger.debug('Cannot look at %(scheme)s URL %(link)s' %
                             locals())
                return None

        if cache is not None:
            inst = cache.get_page(url)
            if inst is not None:
                return inst
        try:
            if skip_archives:
                if cache is not None:
                    if cache.is_archive(url):
                        return None
                filename = link.filename
                for bad_ext in ['.tar', '.tar.gz', '.tar.bz2', '.tgz', '.zip']:
                    if filename.endswith(bad_ext):
                        content_type = cls._get_content_type(url)
                        if content_type.lower().startswith('text/html'):
                            break
                        else:
                            logger.debug(
                                'Skipping page %s because of Content-Type: %s'
                                % (link, content_type))
                            if cache is not None:
                                cache.set_is_archive(url)
                            return None
            logger.debug('Getting page %s' % url)

            # Tack index.html onto file:// URLs that point to directories
            (scheme, netloc, path, params, query,
             fragment) = urlparse.urlparse(url)
            if scheme == 'file' and os.path.isdir(url2pathname(path)):
                # add trailing slash if not present so urljoin doesn't trim final segment
                if not url.endswith('/'):
                    url += '/'
                url = urlparse.urljoin(url, 'index.html')
                logger.debug(' file: URL is directory, getting %s' % url)

            resp = urlopen(url)

            real_url = geturl(resp)
            headers = resp.info()
            contents = resp.read()
            encoding = headers.get('Content-Encoding', None)
            #XXX need to handle exceptions and add testing for this
            if encoding is not None:
                if encoding == 'gzip':
                    contents = gzip.GzipFile(fileobj=BytesIO(contents)).read()
                if encoding == 'deflate':
                    contents = zlib.decompress(contents)
            inst = cls(u(contents), real_url, headers)
        except (HTTPError, URLError, socket.timeout, socket.error, OSError,
                WindowsError):
            e = sys.exc_info()[1]
            desc = str(e)
            if isinstance(e, socket.timeout):
                log_meth = logger.info
                level = 1
                desc = 'timed out'
            elif isinstance(e, URLError):
                log_meth = logger.info
                if hasattr(e, 'reason') and isinstance(e.reason,
                                                       socket.timeout):
                    desc = 'timed out'
                    level = 1
                else:
                    level = 2
            elif isinstance(e, HTTPError) and e.code == 404:
                ## FIXME: notify?
                log_meth = logger.info
                level = 2
            else:
                log_meth = logger.info
                level = 1
            log_meth('Could not fetch URL %s: %s' % (link, desc))
            log_meth(
                'Will skip URL %s when looking for download links for %s' %
                (link.url, req))
            if cache is not None:
                cache.add_page_failure(url, level)
            return None
        if cache is not None:
            cache.add_page([url, real_url], inst)
        return inst
Example #38
0
    changed = set()
    generated = []

    # Compile all of the pyc files that we're going to be installing
    if pycompile:
<<<<<<< HEAD
        compileall.compile_dir(source, force=True, quiet=True)

    def normpath(src, p):
        return make_path_relative(src, p).replace(os.path.sep, '/')
=======
        with captured_stdout() as stdout:
            with warnings.catch_warnings():
                warnings.filterwarnings('ignore')
                compileall.compile_dir(source, force=True, quiet=True)
        logger.debug(stdout.getvalue())

    def normpath(src, p):
        return os.path.relpath(src, p).replace(os.path.sep, '/')
>>>>>>> 54eef0be98b1b67c8507db91f4cfa90b64991027

    def record_installed(srcfile, destfile, modified=False):
        """Map archive RECORD paths to installation RECORD paths."""
        oldpath = normpath(srcfile, wheeldir)
        newpath = normpath(destfile, lib_dir)
        installed[oldpath] = newpath
        if modified:
            changed.add(destfile)

    def clobber(source, dest, is_base, fixer=None, filter=None):
<<<<<<< HEAD
Example #39
0
    def _link_package_versions(self, link, search_name):
        """
        Return an iterable of triples (pkg_resources_version_key,
        link, python_version) that can be extracted from the given
        link.

        Meant to be overridden by subclasses, not called by clients.
        """
        platform = get_platform()

        version = None
        if link.egg_fragment:
            egg_info = link.egg_fragment
        else:
            egg_info, ext = link.splitext()
            if not ext:
                if link not in self.logged_links:
                    logger.debug('Skipping link %s; not a file' % link)
                    self.logged_links.add(link)
                return []
            if egg_info.endswith('.tar'):
                # Special double-extension case:
                egg_info = egg_info[:-4]
                ext = '.tar' + ext
            if ext not in self._known_extensions():
                if link not in self.logged_links:
                    logger.debug(
                        'Skipping link %s; unknown archive format: %s' %
                        (link, ext))
                    self.logged_links.add(link)
                return []
            if "macosx10" in link.path and ext == '.zip':
                if link not in self.logged_links:
                    logger.debug('Skipping link %s; macosx10 one' % (link))
                    self.logged_links.add(link)
                return []
            if ext == wheel_ext:
                try:
                    wheel = Wheel(link.filename)
                except InvalidWheelFilename:
                    logger.debug(
                        'Skipping %s because the wheel filename is invalid' %
                        link)
                    return []
                if wheel.name.lower() != search_name.lower():
                    logger.debug(
                        'Skipping link %s; wrong project name (not %s)' %
                        (link, search_name))
                    return []
                if not wheel.supported():
                    logger.debug(
                        'Skipping %s because it is not compatible with this Python'
                        % link)
                    return []
                # This is a dirty hack to prevent installing Binary Wheels from
                # PyPI unless it is a Windows or Mac Binary Wheel. This is
                # paired with a change to PyPI disabling uploads for the
                # same. Once we have a mechanism for enabling support for binary
                # wheels on linux that deals with the inherent problems of
                # binary distribution this can be removed.
                comes_from = getattr(link, "comes_from", None)
                if ((not platform.startswith('win')
                     and not platform.startswith('macosx'))
                        and comes_from is not None and urlparse.urlparse(
                            comes_from.url).netloc.endswith("pypi.python.org")
                    ):
                    if not wheel.supported(tags=supported_tags_noarch):
                        logger.debug(
                            "Skipping %s because it is a pypi-hosted binary "
                            "Wheel on an unsupported platform" % link)
                        return []
                version = wheel.version

        if not version:
            version = self._egg_info_matches(egg_info, search_name, link)
        if version is None:
            logger.debug('Skipping link %s; wrong project name (not %s)' %
                         (link, search_name))
            return []

        if (link.internal is not None and not link.internal and
                not normalize_name(search_name).lower() in self.allow_external
                and not self.allow_all_external):
            # We have a link that we are sure is external, so we should skip
            #   it unless we are allowing externals
            logger.debug("Skipping %s because it is externally hosted." % link)
            self.need_warn_external = True
            return []

        if (link.verifiable is not None and not link.verifiable
                and not (normalize_name(search_name).lower()
                         in self.allow_unverified)
                and not self.allow_all_unverified):
            # We have a link that we are sure we cannot verify it's integrity,
            #   so we should skip it unless we are allowing unsafe installs
            #   for this requirement.
            logger.debug("Skipping %s because it is an insecure and "
                         "unverifiable file." % link)
            self.need_warn_unverified = True
            return []

        match = self._py_version_re.search(version)
        if match:
            version = version[:match.start()]
            py_version = match.group(1)
            if py_version != sys.version[:3]:
                logger.debug(
                    'Skipping %s because Python version is incorrect' % link)
                return []
        logger.debug('Found link %s, version: %s' % (link, version))
        return [(pkg_resources.parse_version(version), link, version)]
Example #40
0
    def get_page(cls, link, req, cache=None, skip_archives=True):
        url = link.url
        url = url.split('#', 1)[0]
        if cache.too_many_failures(url):
            return None

        # Check for VCS schemes that do not support lookup as web pages.
        from pip.vcs import VcsSupport
        for scheme in VcsSupport.schemes:
            if url.lower().startswith(scheme) and url[len(scheme)] in '+:':
                logger.debug('Cannot look at %(scheme)s URL %(link)s' % locals())
                return None

        if cache is not None:
            inst = cache.get_page(url)
            if inst is not None:
                return inst
        try:
            if skip_archives:
                if cache is not None:
                    if cache.is_archive(url):
                        return None
                filename = link.filename
                for bad_ext in ['.tar', '.tar.gz', '.tar.bz2', '.tgz', '.zip']:
                    if filename.endswith(bad_ext):
                        content_type = cls._get_content_type(url)
                        if content_type.lower().startswith('text/html'):
                            break
                        else:
                            logger.debug('Skipping page %s because of Content-Type: %s' % (link, content_type))
                            if cache is not None:
                                cache.set_is_archive(url)
                            return None
            logger.debug('Getting page %s' % url)

            # Tack index.html onto file:// URLs that point to directories
            (scheme, netloc, path, params, query, fragment) = urlparse.urlparse(url)
            if scheme == 'file' and os.path.isdir(url2pathname(path)):
                # add trailing slash if not present so urljoin doesn't trim final segment
                if not url.endswith('/'):
                    url += '/'
                url = urlparse.urljoin(url, 'index.html')
                logger.debug(' file: URL is directory, getting %s' % url)

            resp = urlopen(url)

            real_url = geturl(resp)
            headers = resp.info()
            contents = resp.read()
            encoding = headers.get('Content-Encoding', None)
            #XXX need to handle exceptions and add testing for this
            if encoding is not None:
                if encoding == 'gzip':
                    contents = gzip.GzipFile(fileobj=BytesIO(contents)).read()
                if encoding == 'deflate':
                    contents = zlib.decompress(contents)
            inst = cls(u(contents), real_url, headers)
        except (HTTPError, URLError, socket.timeout, socket.error, OSError, WindowsError):
            e = sys.exc_info()[1]
            desc = str(e)
            if isinstance(e, socket.timeout):
                log_meth = logger.info
                level =1
                desc = 'timed out'
            elif isinstance(e, URLError):
                log_meth = logger.info
                if hasattr(e, 'reason') and isinstance(e.reason, socket.timeout):
                    desc = 'timed out'
                    level = 1
                else:
                    level = 2
            elif isinstance(e, HTTPError) and e.code == 404:
                ## FIXME: notify?
                log_meth = logger.info
                level = 2
            else:
                log_meth = logger.info
                level = 1
            log_meth('Could not fetch URL %s: %s' % (link, desc))
            log_meth('Will skip URL %s when looking for download links for %s' % (link.url, req))
            if cache is not None:
                cache.add_page_failure(url, level)
            return None
        if cache is not None:
            cache.add_page([url, real_url], inst)
        return inst
Example #41
0
    def get_page(cls, link, req, cache=None, skip_archives=True, session=None):
        if session is None:
            session = PipSession()

        url = link.url
        url = url.split('#', 1)[0]
        if cache.too_many_failures(url):
            return None

        # Check for VCS schemes that do not support lookup as web pages.
        from pip.vcs import VcsSupport
        for scheme in VcsSupport.schemes:
            if url.lower().startswith(scheme) and url[len(scheme)] in '+:':
                logger.debug('Cannot look at %(scheme)s URL %(link)s' %
                             locals())
                return None

        if cache is not None:
            inst = cache.get_page(url)
            if inst is not None:
                return inst
        try:
            if skip_archives:
                if cache is not None:
                    if cache.is_archive(url):
                        return None
                filename = link.filename
                for bad_ext in ['.tar', '.tar.gz', '.tar.bz2', '.tgz', '.zip']:
                    if filename.endswith(bad_ext):
                        content_type = cls._get_content_type(
                            url,
                            session=session,
                        )
                        if content_type.lower().startswith('text/html'):
                            break
                        else:
                            logger.debug(
                                'Skipping page %s because of Content-Type: %s'
                                % (link, content_type))
                            if cache is not None:
                                cache.set_is_archive(url)
                            return None
            logger.debug('Getting page %s' % url)

            # Tack index.html onto file:// URLs that point to directories
            (scheme, netloc, path, params, query,
             fragment) = urlparse.urlparse(url)
            if scheme == 'file' and os.path.isdir(url2pathname(path)):
                # add trailing slash if not present so urljoin doesn't trim final segment
                if not url.endswith('/'):
                    url += '/'
                url = urlparse.urljoin(url, 'index.html')
                logger.debug(' file: URL is directory, getting %s' % url)

            resp = session.get(url, headers={"Accept": "text/html"})
            resp.raise_for_status()

            # The check for archives above only works if the url ends with
            #   something that looks like an archive. However that is not a
            #   requirement. For instance http://sourceforge.net/projects/docutils/files/docutils/0.8.1/docutils-0.8.1.tar.gz/download
            #   redirects to http://superb-dca3.dl.sourceforge.net/project/docutils/docutils/0.8.1/docutils-0.8.1.tar.gz
            #   Unless we issue a HEAD request on every url we cannot know
            #   ahead of time for sure if something is HTML or not. However we
            #   can check after we've downloaded it.
            content_type = resp.headers.get('Content-Type', 'unknown')
            if not content_type.lower().startswith("text/html"):
                logger.debug('Skipping page %s because of Content-Type: %s' %
                             (link, content_type))
                if cache is not None:
                    cache.set_is_archive(url)
                return None

            inst = cls(resp.text, resp.url, resp.headers, trusted=link.trusted)
        except requests.HTTPError as exc:
            level = 2 if exc.response.status_code == 404 else 1
            cls._handle_fail(req, link, exc, url, cache=cache, level=level)
        except requests.ConnectionError as exc:
            cls._handle_fail(
                req,
                link,
                "connection error: %s" % exc,
                url,
                cache=cache,
            )
        except requests.Timeout:
            cls._handle_fail(req, link, "timed out", url, cache=cache)
        except SSLError as exc:
            reason = ("There was a problem confirming the ssl certificate: "
                      "%s" % exc)
            cls._handle_fail(
                req,
                link,
                reason,
                url,
                cache=cache,
                level=2,
                meth=logger.notify,
            )
        except requests.TooManyRedirects as exc:
            cls._handle_fail(
                req,
                link,
                "Error: %s" % exc,
                url,
                cache=cache,
            )
        else:
            if cache is not None:
                cache.add_page([url, resp.url], inst)
            return inst
Example #42
0
    def find_requirement(self, req, upgrade):
        url_name = req.url_name
        # Only check main index if index URL is given:
        main_index_url = None
        if self.index_urls:
            # Check that we have the url_name correctly spelled:
            main_index_url = Link(posixpath.join(self.index_urls[0], url_name))
            # This will also cache the page, so it's okay that we get it again later:
            page = self._get_page(main_index_url, req)
            if page is None:
                url_name = self._find_url_name(Link(self.index_urls[0]), url_name, req) or req.url_name

        # Combine index URLs with mirror URLs here to allow
        # adding more index URLs from requirements files
        all_index_urls = self.index_urls + self.mirror_urls

        def mkurl_pypi_url(url):
            loc = posixpath.join(url, url_name)
            # For maximum compatibility with easy_install, ensure the path
            # ends in a trailing slash.  Although this isn't in the spec
            # (and PyPI can handle it without the slash) some other index
            # implementations might break if they relied on easy_install's behavior.
            if not loc.endswith('/'):
                loc = loc + '/'
            return loc
        if url_name is not None:
            locations = [
                mkurl_pypi_url(url)
                for url in all_index_urls] + self.find_links
        else:
            locations = list(self.find_links)
        locations.extend(self.dependency_links)
        for version in req.absolute_versions:
            if url_name is not None and main_index_url is not None:
                locations = [
                    posixpath.join(main_index_url.url, version)] + locations

        file_locations, url_locations = self._sort_locations(locations)

        locations = [Link(url) for url in url_locations]
        logger.debug('URLs to search for versions for %s:' % req)
        for location in locations:
            logger.debug('* %s' % location)
        found_versions = []
        found_versions.extend(
            self._package_versions(
                [Link(url, '-f') for url in self.find_links], req.name.lower()))
        page_versions = []
        for page in self._get_pages(locations, req):
            logger.debug('Analyzing links from page %s' % page.url)
            logger.indent += 2
            try:
                page_versions.extend(self._package_versions(page.links, req.name.lower()))
            finally:
                logger.indent -= 2
        dependency_versions = list(self._package_versions(
            [Link(url) for url in self.dependency_links], req.name.lower()))
        if dependency_versions:
            logger.info('dependency_links found: %s' % ', '.join([link.url for parsed, link, version in dependency_versions]))
        file_versions = list(self._package_versions(
                [Link(url) for url in file_locations], req.name.lower()))
        if not found_versions and not page_versions and not dependency_versions and not file_versions:
            logger.fatal('Could not find any downloads that satisfy the requirement %s' % req)
            raise DistributionNotFound('No distributions at all found for %s' % req)
        if req.satisfied_by is not None:
            found_versions.append((req.satisfied_by.parsed_version, Inf, req.satisfied_by.version))
        if file_versions:
            file_versions.sort(reverse=True)
            logger.info('Local files found: %s' % ', '.join([url_to_path(link.url) for parsed, link, version in file_versions]))
            found_versions = file_versions + found_versions
        all_versions = found_versions + page_versions + dependency_versions
        applicable_versions = []
        for (parsed_version, link, version) in all_versions:
            if version not in req.req:
                logger.info("Ignoring link %s, version %s doesn't match %s"
                            % (link, version, ','.join([''.join(s) for s in req.req.specs])))
                continue
            applicable_versions.append((link, version))
        applicable_versions = sorted(applicable_versions, key=lambda v: pkg_resources.parse_version(v[1]), reverse=True)
        existing_applicable = bool([link for link, version in applicable_versions if link is Inf])
        if not upgrade and existing_applicable:
            if applicable_versions[0][1] is Inf:
                logger.info('Existing installed version (%s) is most up-to-date and satisfies requirement'
                            % req.satisfied_by.version)
                raise BestVersionAlreadyInstalled
            else:
                logger.info('Existing installed version (%s) satisfies requirement (most up-to-date version is %s)'
                            % (req.satisfied_by.version, applicable_versions[0][1]))
            return None
        if not applicable_versions:
            logger.fatal('Could not find a version that satisfies the requirement %s (from versions: %s)'
                         % (req, ', '.join([version for parsed_version, link, version in found_versions])))
            raise DistributionNotFound('No distributions matching the version for %s' % req)
        if applicable_versions[0][0] is Inf:
            # We have an existing version, and its the best version
            logger.info('Installed version (%s) is most up-to-date (past versions: %s)'
                        % (req.satisfied_by.version, ', '.join([version for link, version in applicable_versions[1:]]) or 'none'))
            raise BestVersionAlreadyInstalled
        if len(applicable_versions) > 1:
            logger.info('Using version %s (newest of versions: %s)' %
                        (applicable_versions[0][1], ', '.join([version for link, version in applicable_versions])))
        return applicable_versions[0][0]
Example #43
0
    def find_requirement(self, req, upgrade):

        def mkurl_pypi_url(url):
            loc = posixpath.join(url, url_name)
            # For maximum compatibility with easy_install, ensure the path
            # ends in a trailing slash.  Although this isn't in the spec
            # (and PyPI can handle it without the slash) some other index
            # implementations might break if they relied on easy_install's
            # behavior.
            if not loc.endswith('/'):
                loc = loc + '/'
            return loc

        url_name = req.url_name
        # Only check main index if index URL is given:
        main_index_url = None
        if self.index_urls:
            # Check that we have the url_name correctly spelled:
            main_index_url = Link(
                mkurl_pypi_url(self.index_urls[0]),
                trusted=True,
            )
            # This will also cache the page, so it's okay that we get it again
            # later:
            page = self._get_page(main_index_url, req)
            if page is None:
                url_name = self._find_url_name(
                    Link(self.index_urls[0], trusted=True),
                    url_name, req
                ) or req.url_name

        if url_name is not None:
            locations = [
                mkurl_pypi_url(url)
                for url in self.index_urls] + self.find_links
        else:
            locations = list(self.find_links)
        for version in req.absolute_versions:
            if url_name is not None and main_index_url is not None:
                locations = [
                    posixpath.join(main_index_url.url, version)] + locations

        file_locations, url_locations = self._sort_locations(locations)

        # We trust every url that the user has given us whether it was given
        #   via --index-url or --find-links
        locations = [Link(url, trusted=True) for url in url_locations]

        logger.debug('URLs to search for versions for %s:' % req)
        for location in locations:
            logger.debug('* %s' % location)

            # Determine if this url used a secure transport mechanism
            parsed = urlparse.urlparse(str(location))
            if parsed.scheme in INSECURE_SCHEMES:
                secure_schemes = INSECURE_SCHEMES[parsed.scheme]

                if len(secure_schemes) == 1:
                    ctx = (location, parsed.scheme, secure_schemes[0],
                           parsed.netloc)
                    logger.warn("%s uses an insecure transport scheme (%s). "
                                "Consider using %s if %s has it available" %
                                ctx)
                elif len(secure_schemes) > 1:
                    ctx = (
                        location,
                        parsed.scheme,
                        ", ".join(secure_schemes),
                        parsed.netloc,
                    )
                    logger.warn("%s uses an insecure transport scheme (%s). "
                                "Consider using one of %s if %s has any of "
                                "them available" % ctx)
                else:
                    ctx = (location, parsed.scheme)
                    logger.warn("%s uses an insecure transport scheme (%s)." %
                                ctx)

        found_versions = []
        found_versions.extend(
            self._package_versions(
                # We trust every directly linked archive in find_links
                [Link(url, '-f', trusted=True) for url in self.find_links],
                req.name.lower()
            )
        )
        page_versions = []
        for page in self._get_pages(locations, req):
            logger.debug('Analyzing links from page %s' % page.url)
            logger.indent += 2
            try:
                page_versions.extend(
                    self._package_versions(page.links, req.name.lower())
                )
            finally:
                logger.indent -= 2
        file_versions = list(
            self._package_versions(
                [Link(url) for url in file_locations],
                req.name.lower()
            )
        )
        if (not found_versions
                and not page_versions
                and not file_versions):
            logger.fatal(
                'Could not find any downloads that satisfy the requirement'
                ' %s' % req
            )

            if self.need_warn_external:
                logger.warn("Some externally hosted files were ignored (use "
                            "--allow-external %s to allow)." % req.name)

            if self.need_warn_unverified:
                logger.warn("Some insecure and unverifiable files were ignored"
                            " (use --allow-unverified %s to allow)." %
                            req.name)

            raise DistributionNotFound(
                'No distributions at all found for %s' % req
            )
        installed_version = []
        if req.satisfied_by is not None:
            installed_version = [(
                req.satisfied_by.parsed_version,
                INSTALLED_VERSION,
                req.satisfied_by.version,
            )]
        if file_versions:
            file_versions.sort(reverse=True)
            logger.info(
                'Local files found: %s' %
                ', '.join([
                    url_to_path(link.url)
                    for parsed, link, version in file_versions
                ])
            )
        # this is an intentional priority ordering
        all_versions = installed_version + file_versions + found_versions \
            + page_versions
        applicable_versions = []
        for (parsed_version, link, version) in all_versions:
            if version not in req.req:
                logger.info(
                    "Ignoring link %s, version %s doesn't match %s" %
                    (
                        link,
                        version,
                        ','.join([''.join(s) for s in req.req.specs])
                    )
                )
                continue
            elif (is_prerelease(version)
                    and not (self.allow_all_prereleases or req.prereleases)):
                # If this version isn't the already installed one, then
                #   ignore it if it's a pre-release.
                if link is not INSTALLED_VERSION:
                    logger.info(
                        "Ignoring link %s, version %s is a pre-release (use "
                        "--pre to allow)." % (link, version)
                    )
                    continue
            applicable_versions.append((parsed_version, link, version))
        applicable_versions = self._sort_versions(applicable_versions)
        existing_applicable = bool([
            link
            for parsed_version, link, version in applicable_versions
            if link is INSTALLED_VERSION
        ])
        if not upgrade and existing_applicable:
            if applicable_versions[0][1] is INSTALLED_VERSION:
                logger.info(
                    'Existing installed version (%s) is most up-to-date and '
                    'satisfies requirement' % req.satisfied_by.version
                )
            else:
                logger.info(
                    'Existing installed version (%s) satisfies requirement '
                    '(most up-to-date version is %s)' %
                    (req.satisfied_by.version, applicable_versions[0][2])
                )
            return None
        if not applicable_versions:
            logger.fatal(
                'Could not find a version that satisfies the requirement %s '
                '(from versions: %s)' %
                (
                    req,
                    ', '.join([
                        version
                        for parsed_version, link, version in all_versions
                    ])
                )
            )

            if self.need_warn_external:
                logger.warn("Some externally hosted files were ignored (use "
                            "--allow-external to allow).")

            if self.need_warn_unverified:
                logger.warn("Some insecure and unverifiable files were ignored"
                            " (use --allow-unverified %s to allow)." %
                            req.name)

            raise DistributionNotFound(
                'No distributions matching the version for %s' % req
            )
        if applicable_versions[0][1] is INSTALLED_VERSION:
            # We have an existing version, and its the best version
            logger.info(
                'Installed version (%s) is most up-to-date (past versions: '
                '%s)' % (
                    req.satisfied_by.version,
                    ', '.join([
                        version for parsed_version, link, version
                        in applicable_versions[1:]
                    ]) or 'none'))
            raise BestVersionAlreadyInstalled
        if len(applicable_versions) > 1:
            logger.info(
                'Using version %s (newest of versions: %s)' %
                (
                    applicable_versions[0][2],
                    ', '.join([
                        version for parsed_version, link, version
                        in applicable_versions
                    ])
                )
            )

        selected_version = applicable_versions[0][1]

        if (selected_version.internal is not None
                and not selected_version.internal):
            logger.warn("%s an externally hosted file and may be "
                        "unreliable" % req.name)

        if (selected_version.verifiable is not None
                and not selected_version.verifiable):
            logger.warn("%s is potentially insecure and "
                        "unverifiable." % req.name)

        if selected_version._deprecated_regex:
            logger.deprecated(
                "1.7",
                "%s discovered using a deprecated method of parsing, "
                "in the future it will no longer be discovered" % req.name
            )

        return selected_version
Example #44
0
    def get_page(cls, link, req, cache=None, skip_archives=True):
        url = link.url
        url = url.split('#', 1)[0]
        if cache.too_many_failures(url):
            return None

        # Check for VCS schemes that do not support lookup as web pages.
        from pip.vcs import VcsSupport
        for scheme in VcsSupport.schemes:
            if url.lower().startswith(scheme) and url[len(scheme)] in '+:':
                logger.debug('Cannot look at %(scheme)s URL %(link)s' % locals())
                return None

        if cache is not None:
            inst = cache.get_page(url)
            if inst is not None:
                return inst
        try:
            if skip_archives:
                if cache is not None:
                    if cache.is_archive(url):
                        return None
                filename = link.filename
                for bad_ext in ['.tar', '.tar.gz', '.tar.bz2', '.tgz', '.zip']:
                    if filename.endswith(bad_ext):
                        content_type = cls._get_content_type(url)
                        if content_type.lower().startswith('text/html'):
                            break
                        else:
                            logger.debug('Skipping page %s because of Content-Type: %s' % (link, content_type))
                            if cache is not None:
                                cache.set_is_archive(url)
                            return None
            logger.debug('Getting page %s' % url)

            # Tack index.html onto file:// URLs that point to directories
            (scheme, netloc, path, params, query, fragment) = urlparse.urlparse(url)
            if scheme == 'file' and os.path.isdir(url2pathname(path)):
                # add trailing slash if not present so urljoin doesn't trim final segment
                if not url.endswith('/'):
                    url += '/'
                url = urlparse.urljoin(url, 'index.html')
                logger.debug(' file: URL is directory, getting %s' % url)

            resp = urlopen(url)

            real_url = geturl(resp)
            headers = resp.info()
            contents = resp.read()
            encoding = headers.get('Content-Encoding', None)
            #XXX need to handle exceptions and add testing for this
            if encoding is not None:
                if encoding == 'gzip':
                    contents = gzip.GzipFile(fileobj=BytesIO(contents)).read()
                if encoding == 'deflate':
                    contents = zlib.decompress(contents)

            # The check for archives above only works if the url ends with
            #   something that looks like an archive. However that is not a
            #   requirement. For instance http://sourceforge.net/projects/docutils/files/docutils/0.8.1/docutils-0.8.1.tar.gz/download
            #   redirects to http://superb-dca3.dl.sourceforge.net/project/docutils/docutils/0.8.1/docutils-0.8.1.tar.gz
            #   Unless we issue a HEAD request on every url we cannot know
            #   ahead of time for sure if something is HTML or not. However we
            #   can check after we've downloaded it.
            content_type = headers.get('Content-Type', 'unknown')
            if not content_type.lower().startswith("text/html"):
                logger.debug('Skipping page %s because of Content-Type: %s' %
                                            (link, content_type))
                if cache is not None:
                    cache.set_is_archive(url)
                return None

            inst = cls(u(contents), real_url, headers, trusted=link.trusted)
        except (HTTPError, URLError, socket.timeout, socket.error, OSError, WindowsError):
            e = sys.exc_info()[1]
            desc = str(e)
            if isinstance(e, socket.timeout):
                log_meth = logger.info
                level =1
                desc = 'timed out'
            elif isinstance(e, URLError):
                #ssl/certificate error
                if hasattr(e, 'reason') and (isinstance(e.reason, ssl.SSLError) or isinstance(e.reason, CertificateError)):
                    desc = 'There was a problem confirming the ssl certificate: %s' % e
                    log_meth = logger.notify
                else:
                    log_meth = logger.info
                if hasattr(e, 'reason') and isinstance(e.reason, socket.timeout):
                    desc = 'timed out'
                    level = 1
                else:
                    level = 2
            elif isinstance(e, HTTPError) and e.code == 404:
                ## FIXME: notify?
                log_meth = logger.info
                level = 2
            else:
                log_meth = logger.info
                level = 1
            log_meth('Could not fetch URL %s: %s' % (link, desc))
            log_meth('Will skip URL %s when looking for download links for %s' % (link.url, req))
            if cache is not None:
                cache.add_page_failure(url, level)
            return None
        if cache is not None:
            cache.add_page([url, real_url], inst)
        return inst
Example #45
0
    def get_page(cls, link, req, cache=None, skip_archives=True, session=None):
        if session is None:
            session = PipSession()

        url = link.url
        url = url.split("#", 1)[0]
        if cache.too_many_failures(url):
            return None

        # Check for VCS schemes that do not support lookup as web pages.
        from pip.vcs import VcsSupport

        for scheme in VcsSupport.schemes:
            if url.lower().startswith(scheme) and url[len(scheme)] in "+:":
                logger.debug("Cannot look at %(scheme)s URL %(link)s" % locals())
                return None

        if cache is not None:
            inst = cache.get_page(url)
            if inst is not None:
                return inst
        try:
            if skip_archives:
                if cache is not None:
                    if cache.is_archive(url):
                        return None
                filename = link.filename
                for bad_ext in [".tar", ".tar.gz", ".tar.bz2", ".tgz", ".zip"]:
                    if filename.endswith(bad_ext):
                        content_type = cls._get_content_type(url, session=session)
                        if content_type.lower().startswith("text/html"):
                            break
                        else:
                            logger.debug("Skipping page %s because of Content-Type: %s" % (link, content_type))
                            if cache is not None:
                                cache.set_is_archive(url)
                            return None
            logger.debug("Getting page %s" % url)

            # Tack index.html onto file:// URLs that point to directories
            (scheme, netloc, path, params, query, fragment) = urlparse.urlparse(url)
            if scheme == "file" and os.path.isdir(url2pathname(path)):
                # add trailing slash if not present so urljoin doesn't trim final segment
                if not url.endswith("/"):
                    url += "/"
                url = urlparse.urljoin(url, "index.html")
                logger.debug(" file: URL is directory, getting %s" % url)

            resp = session.get(url)
            resp.raise_for_status()

            # The check for archives above only works if the url ends with
            #   something that looks like an archive. However that is not a
            #   requirement. For instance http://sourceforge.net/projects/docutils/files/docutils/0.8.1/docutils-0.8.1.tar.gz/download
            #   redirects to http://superb-dca3.dl.sourceforge.net/project/docutils/docutils/0.8.1/docutils-0.8.1.tar.gz
            #   Unless we issue a HEAD request on every url we cannot know
            #   ahead of time for sure if something is HTML or not. However we
            #   can check after we've downloaded it.
            content_type = resp.headers.get("Content-Type", "unknown")
            if not content_type.lower().startswith("text/html"):
                logger.debug("Skipping page %s because of Content-Type: %s" % (link, content_type))
                if cache is not None:
                    cache.set_is_archive(url)
                return None

            inst = cls(resp.text, resp.url, resp.headers, trusted=link.trusted)
        except requests.HTTPError as exc:
            level = 2 if exc.response.status_code == 404 else 1
            cls._handle_fail(req, link, exc, url, cache=cache, level=level)
        except requests.Timeout:
            cls._handle_fail(req, link, "timed out", url, cache=cache)
        except SSLError as exc:
            reason = "There was a problem confirming the ssl certificate: " "%s" % exc
            cls._handle_fail(req, link, reason, url, cache=cache, level=2, meth=logger.notify)
        else:
            if cache is not None:
                cache.add_page([url, resp.url], inst)
            return inst
Example #46
0
    def find_requirement(self, req, upgrade):
        url_name = req.url_name
        # Only check main index if index URL is given:
        main_index_url = None
        if self.index_urls:
            # Check that we have the url_name correctly spelled:
            main_index_url = Link(posixpath.join(self.index_urls[0], url_name))
            # This will also cache the page, so it's okay that we get it again later:
            page = self._get_page(main_index_url, req)
            if page is None:
                url_name = self._find_url_name(Link(self.index_urls[0]),
                                               url_name, req) or req.url_name

        # Combine index URLs with mirror URLs here to allow
        # adding more index URLs from requirements files
        all_index_urls = self.index_urls + self.mirror_urls

        def mkurl_pypi_url(url):
            loc = posixpath.join(url, url_name)
            # For maximum compatibility with easy_install, ensure the path
            # ends in a trailing slash.  Although this isn't in the spec
            # (and PyPI can handle it without the slash) some other index
            # implementations might break if they relied on easy_install's behavior.
            if not loc.endswith('/'):
                loc = loc + '/'
            return loc

        if url_name is not None:
            locations = [mkurl_pypi_url(url)
                         for url in all_index_urls] + self.find_links
        else:
            locations = list(self.find_links)
        locations.extend(self.dependency_links)
        for version in req.absolute_versions:
            if url_name is not None and main_index_url is not None:
                locations = [posixpath.join(main_index_url.url, version)
                             ] + locations

        file_locations, url_locations = self._sort_locations(locations)

        locations = [Link(url) for url in url_locations]
        logger.debug('URLs to search for versions for %s:' % req)
        for location in locations:
            logger.debug('* %s' % location)
        found_versions = []
        found_versions.extend(
            self._package_versions(
                [Link(url, '-f') for url in self.find_links],
                req.name.lower()))
        page_versions = []
        for page in self._get_pages(locations, req):
            logger.debug('Analyzing links from page %s' % page.url)
            logger.indent += 2
            try:
                page_versions.extend(
                    self._package_versions(page.links, req.name.lower()))
            finally:
                logger.indent -= 2
        dependency_versions = list(
            self._package_versions(
                [Link(url) for url in self.dependency_links],
                req.name.lower()))
        if dependency_versions:
            logger.info('dependency_links found: %s' % ', '.join(
                [link.url for parsed, link, version in dependency_versions]))
        file_versions = list(
            self._package_versions([Link(url) for url in file_locations],
                                   req.name.lower()))
        if not found_versions and not page_versions and not dependency_versions and not file_versions:
            logger.fatal(
                'Could not find any downloads that satisfy the requirement %s'
                % req)
            raise DistributionNotFound('No distributions at all found for %s' %
                                       req)
        if req.satisfied_by is not None:
            found_versions.append((req.satisfied_by.parsed_version, Inf,
                                   req.satisfied_by.version))
        if file_versions:
            file_versions.sort(reverse=True)
            logger.info('Local files found: %s' % ', '.join([
                url_to_path(link.url)
                for parsed, link, version in file_versions
            ]))
            found_versions = file_versions + found_versions
        all_versions = found_versions + page_versions + dependency_versions
        applicable_versions = []
        for (parsed_version, link, version) in all_versions:
            if version not in req.req:
                logger.info("Ignoring link %s, version %s doesn't match %s" %
                            (link, version, ','.join(
                                [''.join(s) for s in req.req.specs])))
                continue
            applicable_versions.append((link, version))
        applicable_versions = sorted(
            applicable_versions,
            key=lambda v: pkg_resources.parse_version(v[1]),
            reverse=True)
        existing_applicable = bool(
            [link for link, version in applicable_versions if link is Inf])
        if not upgrade and existing_applicable:
            if applicable_versions[0][1] is Inf:
                logger.info(
                    'Existing installed version (%s) is most up-to-date and satisfies requirement'
                    % req.satisfied_by.version)
                raise BestVersionAlreadyInstalled
            else:
                logger.info(
                    'Existing installed version (%s) satisfies requirement (most up-to-date version is %s)'
                    % (req.satisfied_by.version, applicable_versions[0][1]))
            return None
        if not applicable_versions:
            logger.fatal(
                'Could not find a version that satisfies the requirement %s (from versions: %s)'
                % (req, ', '.join([
                    version for parsed_version, link, version in found_versions
                ])))
            raise DistributionNotFound(
                'No distributions matching the version for %s' % req)
        if applicable_versions[0][0] is Inf:
            # We have an existing version, and its the best version
            logger.info(
                'Installed version (%s) is most up-to-date (past versions: %s)'
                % (req.satisfied_by.version, ', '.join(
                    [version
                     for link, version in applicable_versions[1:]]) or 'none'))
            raise BestVersionAlreadyInstalled
        if len(applicable_versions) > 1:
            logger.info('Using version %s (newest of versions: %s)' %
                        (applicable_versions[0][1], ', '.join(
                            [version
                             for link, version in applicable_versions])))
        return applicable_versions[0][0]
Example #47
0
File: info.py Project: markpasc/pip
    def release_in_index(self, dist, index_url):
        import xmlrpclib
        server = xmlrpclib.Server(index_url)

        req = pkg_resources.Requirement.parse(dist)
        logger.debug('Requirement for %r is %r', dist, req)

        releases = server.package_releases(req.project_name)
        logger.debug('Checking if %r are in %r', releases, req)
        good_releases = sorted((r for r in releases if r in req),
            key=pkg_resources.parse_version)
        if not good_releases:
            logger.debug('Oops, none of %r are in %r', releases, req)
            raise DistributionNotFound()

        logger.debug('Yay, %r are in %r', good_releases, req)
        best_release = good_releases.pop(-1)
        logger.debug('Yay, asking for %r %r', req.project_name, best_release)
        release = server.release_data(req.project_name, best_release)

        release['_pip_index_url'] = index_url
        release['_pip_other_versions'] = [r for r in releases if r != best_release]
        logger.debug('Other versions are %r', release['_pip_other_versions'])

        return release