Esempio n. 1
0
def test_url_parse():

    parsed = url_util.parse('/path/to/resource', scheme='fake')
    assert(parsed.scheme == 'fake')
    assert(parsed.netloc == '')
    assert(parsed.path == '/path/to/resource')

    parsed = url_util.parse('file:///path/to/resource')
    assert(parsed.scheme == 'file')
    assert(parsed.netloc == '')
    assert(parsed.path == '/path/to/resource')

    parsed = url_util.parse('file:///path/to/resource', scheme='fake')
    assert(parsed.scheme == 'file')
    assert(parsed.netloc == '')
    assert(parsed.path == '/path/to/resource')

    parsed = url_util.parse('file://path/to/resource')
    assert(parsed.scheme == 'file')
    expected = convert_to_posix_path(
        os.path.abspath(
            posixpath.join('path', 'to', 'resource')))
    if is_windows:
        expected = expected.lstrip(drive)
    assert(parsed.path == expected)

    if is_windows:
        parsed = url_util.parse('file://%s\\path\\to\\resource' % drive)
        assert(parsed.scheme == 'file')
        expected = '/' + posixpath.join('path', 'to', 'resource')
        assert parsed.path == expected

    parsed = url_util.parse('https://path/to/resource')
    assert(parsed.scheme == 'https')
    assert(parsed.netloc == 'path')
    assert(parsed.path == '/to/resource')

    parsed = url_util.parse('gs://path/to/resource')
    assert(parsed.scheme == 'gs')
    assert(parsed.netloc == 'path')
    assert(parsed.path == '/to/resource')

    spack_root = spack.paths.spack_root
    parsed = url_util.parse('file://$spack')
    assert(parsed.scheme == 'file')

    if is_windows:
        spack_root = '/' + convert_to_posix_path(spack_root)

    assert(parsed.netloc + parsed.path == spack_root)
Esempio n. 2
0
    def _std_args(pkg):
        """Computes the standard cmake arguments for a generic package"""

        try:
            generator = pkg.generator
        except AttributeError:
            generator = CMakePackage.generator

        # Make sure a valid generator was chosen
        valid_primary_generators = ['Unix Makefiles', 'Ninja']
        primary_generator = _extract_primary_generator(generator)
        if primary_generator not in valid_primary_generators:
            msg = "Invalid CMake generator: '{0}'\n".format(generator)
            msg += "CMakePackage currently supports the following "
            msg += "primary generators: '{0}'".\
                   format("', '".join(valid_primary_generators))
            raise InstallError(msg)

        try:
            build_type = pkg.spec.variants['build_type'].value
        except KeyError:
            build_type = 'RelWithDebInfo'

        try:
            ipo = pkg.spec.variants['ipo'].value
        except KeyError:
            ipo = False

        define = CMakePackage.define
        args = [
            '-G',
            generator,
            define('CMAKE_INSTALL_PREFIX', convert_to_posix_path(pkg.prefix)),
            define('CMAKE_BUILD_TYPE', build_type),
            define('BUILD_TESTING', pkg.run_tests),
        ]

        # CMAKE_INTERPROCEDURAL_OPTIMIZATION only exists for CMake >= 3.9
        if pkg.spec.satisfies('^[email protected]:'):
            args.append(define('CMAKE_INTERPROCEDURAL_OPTIMIZATION', ipo))

        if primary_generator == 'Unix Makefiles':
            args.append(define('CMAKE_VERBOSE_MAKEFILE', True))

        if platform.mac_ver()[0]:
            args.extend([
                define('CMAKE_FIND_FRAMEWORK', "LAST"),
                define('CMAKE_FIND_APPBUNDLE', "LAST"),
            ])

        # Set up CMake rpath
        args.extend([
            define('CMAKE_INSTALL_RPATH_USE_LINK_PATH', True),
            define('CMAKE_INSTALL_RPATH',
                   spack.build_environment.get_rpaths(pkg)),
            define('CMAKE_PREFIX_PATH',
                   spack.build_environment.get_cmake_prefix_path(pkg))
        ])
        return args
Esempio n. 3
0
 def setup(self, text):
     if isinstance(text, string_types):
         # shlex does not handle Windows path
         # separators, so we must normalize to posix
         text = sp.convert_to_posix_path(text)
         text = shlex.split(str(text))
     self.text = text
     self.push_tokens(self.lexer.lex(text))
Esempio n. 4
0
def test_root_get_and_set(mutable_config, scope):
    scope_args, path = [], '/scratch/spack/bootstrap'
    if scope:
        scope_args = ['--scope={0}'.format(scope)]

    _bootstrap('root', path, *scope_args)
    out = _bootstrap('root', *scope_args, output=str)
    if sys.platform == 'win32':
        out = convert_to_posix_path(out)
    assert out.strip() == path
Esempio n. 5
0
def _join(base_url, path, *extra, **kwargs):
    base_url = parse(base_url)
    resolve_href = kwargs.get('resolve_href', False)

    (scheme, netloc, base_path, params, query, _) = base_url
    scheme = scheme.lower()

    path_tokens = [
        part for part in itertools.chain(
            _split_all(path),
            itertools.chain.from_iterable(
                _split_all(extra_path) for extra_path in extra))
        if part and part != '/'
    ]

    base_path_args = ['/fake-root']
    if scheme == 's3':
        if netloc:
            base_path_args.append(netloc)

    if base_path.startswith('/'):
        base_path = base_path[1:]

    base_path_args.append(base_path)

    if resolve_href:
        new_base_path, _ = posixpath.split(posixpath.join(*base_path_args))
        base_path_args = [new_base_path]

    base_path_args.extend(path_tokens)
    base_path = posixpath.relpath(posixpath.join(*base_path_args),
                                  '/fake-root')

    if scheme == 's3':
        path_tokens = [
            part for part in _split_all(base_path) if part and part != '/'
        ]

        if path_tokens:
            netloc = path_tokens.pop(0)
            base_path = posixpath.join('', *path_tokens)

    if sys.platform == "win32":
        base_path = convert_to_posix_path(base_path)

    return format(
        urllib_parse.ParseResult(scheme=scheme,
                                 netloc=netloc,
                                 path=base_path,
                                 params=params,
                                 query=query,
                                 fragment=None))
Esempio n. 6
0
def parse(url, scheme='file'):
    """Parse a url.

    For file:// URLs, the netloc and path components are concatenated and
    passed through spack.util.path.canoncalize_path().

    Otherwise, the returned value is the same as urllib's urlparse() with
    allow_fragments=False.
    """
    # guarantee a value passed in is of proper url format. Guarantee
    # allows for easier string manipulation accross platforms
    if isinstance(url, string_types):
        require_url_format(url)
        url = escape_file_url(url)
    url_obj = (
        urllib_parse.urlparse(url, scheme=scheme, allow_fragments=False)
        if isinstance(url, string_types) else url)

    (scheme, netloc, path, params, query, _) = url_obj

    scheme = (scheme or 'file').lower()

    if scheme == 'file':

        # (The user explicitly provides the file:// scheme.)
        #   examples:
        #     file://C:\\a\\b\\c
        #     file://X:/a/b/c
        path = canonicalize_path(netloc + path)
        path = re.sub(r'^/+', '/', path)
        netloc = ''

        drive_ltr_lst = re.findall(r'[A-Za-z]:\\', path)
        is_win_path = bool(drive_ltr_lst)
        if is_windows and is_win_path:
            drive_ltr = drive_ltr_lst[0].strip('\\')
            path = re.sub(r'[\\]*' + drive_ltr, '', path)
            netloc = '/' + drive_ltr.strip('\\')

    if sys.platform == "win32":
        path = convert_to_posix_path(path)

    return urllib_parse.ParseResult(scheme=scheme,
                                    netloc=netloc,
                                    path=path,
                                    params=params,
                                    query=query,
                                    fragment=None)
Esempio n. 7
0
def test_url_join_absolute_paths():
    # Handling absolute path components is a little tricky.  To this end, we
    # distinguish "absolute path components", from the more-familiar concept of
    # "absolute paths" as they are understood for local filesystem paths.
    #
    # - All absolute paths are absolute path components.  Joining a URL with
    #   these components has the effect of completely replacing the path of the
    #   URL with the absolute path.  These components do not specify a URL
    #   scheme, so the scheme of the URL procuced when joining them depend on
    #   those provided by components that came before it (file:// assumed if no
    #   such scheme is provided).

    # For eaxmple:
    p = '/path/to/resource'
    # ...is an absolute path

    # http:// URL
    assert (url_util.join('http://example.com/a/b/c',
                          p) == 'http://example.com/path/to/resource')

    # s3:// URL
    # also notice how the netloc is treated as part of the path for s3:// URLs
    assert (url_util.join('s3://example.com/a/b/c',
                          p) == 's3://path/to/resource')

    # - URL components that specify a scheme are always absolute path
    #   components.  Joining a base URL with these components effectively
    #   discards the base URL and "resets" the joining logic starting at the
    #   component in question and using it as the new base URL.

    # For eaxmple:
    p = 'http://example.com/path/to'
    # ...is an http:// URL

    join_result = url_util.join(p, 'resource')
    assert (join_result == 'http://example.com/path/to/resource')

    # works as if everything before the http:// URL was left out
    assert (url_util.join('literally', 'does', 'not', 'matter', p,
                          'resource') == join_result)

    # It's important to keep in mind that this logic applies even if the
    # component's path is not an absolute path!

    # For eaxmple:
    p = './d'
    # ...is *NOT* an absolute path
    # ...is also *NOT* an absolute path component

    u = 'file://./d'
    # ...is a URL
    #     The path of this URL is *NOT* an absolute path
    #     HOWEVER, the URL, itself, *is* an absolute path component

    # (We just need...
    cwd = os.getcwd()
    # ...to work out what resource it points to)

    if sys.platform == "win32":
        convert_to_posix_path(cwd)
        cwd = '/' + cwd

    # So, even though parse() assumes "file://" URL, the scheme is still
    # significant in URL path components passed to join(), even if the base
    # is a file:// URL.

    path_join_result = 'file:///a/b/c/d'
    assert (url_util.join('/a/b/c', p) == path_join_result)
    assert (url_util.join('file:///a/b/c', p) == path_join_result)

    url_join_result = 'file://{CWD}/d'.format(CWD=cwd)
    assert (url_util.join('/a/b/c', u) == url_join_result)
    assert (url_util.join('file:///a/b/c', u) == url_join_result)

    # Finally, resolve_href should have no effect for how absolute path
    # components are handled because local hrefs can not be absolute path
    # components.
    args = [
        's3://does', 'not', 'matter', 'http://example.com', 'also', 'does',
        'not', 'matter', '/path'
    ]

    expected = 'http://example.com/path'
    assert (url_util.join(*args, resolve_href=True) == expected)
    assert (url_util.join(*args, resolve_href=False) == expected)

    # resolve_href only matters for the local path components at the end of the
    # argument list.
    args[-1] = '/path/to/page'
    args.extend(('..', '..', 'resource'))

    assert (url_util.join(*args,
                          resolve_href=True) == 'http://example.com/resource')

    assert (url_util.join(
        *args, resolve_href=False) == 'http://example.com/path/resource')
Esempio n. 8
0
def join(base_url, path, *extra, **kwargs):
    """Joins a base URL with one or more local URL path components

    If resolve_href is True, treat the base URL as though it where the locator
    of a web page, and the remaining URL path components as though they formed
    a relative URL to be resolved against it (i.e.: as in posixpath.join(...)).
    The result is an absolute URL to the resource to which a user's browser
    would navigate if they clicked on a link with an "href" attribute equal to
    the relative URL.

    If resolve_href is False (default), then the URL path components are joined
    as in posixpath.join().

    Note: file:// URL path components are not canonicalized as part of this
    operation.  To canonicalize, pass the joined url to format().

    Examples:
      base_url = 's3://bucket/index.html'
      body = fetch_body(prefix)
      link = get_href(body) # link == '../other-bucket/document.txt'

      # wrong - link is a local URL that needs to be resolved against base_url
      spack.util.url.join(base_url, link)
      's3://bucket/other_bucket/document.txt'

      # correct - resolve local URL against base_url
      spack.util.url.join(base_url, link, resolve_href=True)
      's3://other_bucket/document.txt'

      prefix = 'https://mirror.spack.io/build_cache'

      # wrong - prefix is just a URL prefix
      spack.util.url.join(prefix, 'my-package', resolve_href=True)
      'https://mirror.spack.io/my-package'

      # correct - simply append additional URL path components
      spack.util.url.join(prefix, 'my-package', resolve_href=False) # default
      'https://mirror.spack.io/build_cache/my-package'

      # For canonicalizing file:// URLs, take care to explicitly differentiate
      # between absolute and relative join components.

      # '$spack' is not an absolute path component
      join_result = spack.util.url.join('/a/b/c', '$spack') ; join_result
      'file:///a/b/c/$spack'
      spack.util.url.format(join_result)
      'file:///a/b/c/opt/spack'

      # '/$spack' *is* an absolute path component
      join_result = spack.util.url.join('/a/b/c', '/$spack') ; join_result
      'file:///$spack'
      spack.util.url.format(join_result)
      'file:///opt/spack'
    """
    paths = [(x) if isinstance(x, string_types) else x.geturl()
             for x in itertools.chain((base_url, path), extra)]

    paths = [convert_to_posix_path(x) for x in paths]
    n = len(paths)
    last_abs_component = None
    scheme = ''
    for i in range(n - 1, -1, -1):
        obj = urllib_parse.urlparse(paths[i], scheme='', allow_fragments=False)

        scheme = obj.scheme

        # in either case the component is absolute
        if scheme or obj.path.startswith('/'):
            if not scheme:
                # Without a scheme, we have to go back looking for the
                # next-last component that specifies a scheme.
                for j in range(i - 1, -1, -1):
                    obj = urllib_parse.urlparse(paths[j],
                                                scheme='',
                                                allow_fragments=False)

                    if obj.scheme:
                        paths[i] = '{SM}://{NL}{PATH}'.format(
                            SM=obj.scheme,
                            NL=((obj.netloc +
                                 '/') if obj.scheme != 's3' else ''),
                            PATH=paths[i][1:])
                        break

            last_abs_component = i
            break

    if last_abs_component is not None:
        paths = paths[last_abs_component:]
        if len(paths) == 1:
            result = urllib_parse.urlparse(paths[0],
                                           scheme='file',
                                           allow_fragments=False)

            # another subtlety: If the last argument to join() is an absolute
            # file:// URL component with a relative path, the relative path
            # needs to be resolved.
            if result.scheme == 'file' and result.netloc:
                result = urllib_parse.ParseResult(
                    scheme=result.scheme,
                    netloc='',
                    path=posixpath.abspath(result.netloc + result.path),
                    params=result.params,
                    query=result.query,
                    fragment=None)

            return result.geturl()

    return _join(*paths, **kwargs)
Esempio n. 9
0
def read_from_url(url, accept_content_type=None):
    url = url_util.parse(url)
    context = None

    verify_ssl = spack.config.get('config:verify_ssl')

    # Don't even bother with a context unless the URL scheme is one that uses
    # SSL certs.
    if uses_ssl(url):
        if verify_ssl:
            if __UNABLE_TO_VERIFY_SSL:
                # User wants SSL verification, but it cannot be provided.
                warn_no_ssl_cert_checking()
            else:
                # User wants SSL verification, and it *can* be provided.
                context = ssl.create_default_context()  # novm
        else:
            # User has explicitly indicated that they do not want SSL
            # verification.
            if not __UNABLE_TO_VERIFY_SSL:
                context = ssl._create_unverified_context()

    url_scheme = url.scheme
    url = url_util.format(url)
    if sys.platform == "win32" and url_scheme == "file":
        url = convert_to_posix_path(url)
    req = Request(url)

    content_type = None
    is_web_url = url_scheme in ('http', 'https')
    if accept_content_type and is_web_url:
        # Make a HEAD request first to check the content type.  This lets
        # us ignore tarballs and gigantic files.
        # It would be nice to do this with the HTTP Accept header to avoid
        # one round-trip.  However, most servers seem to ignore the header
        # if you ask for a tarball with Accept: text/html.
        req.get_method = lambda: "HEAD"
        resp = _urlopen(req, timeout=_timeout, context=context)

        content_type = get_header(resp.headers, 'Content-type')

    # Do the real GET request when we know it's just HTML.
    req.get_method = lambda: "GET"

    try:
        response = _urlopen(req, timeout=_timeout, context=context)
    except URLError as err:
        raise SpackWebError('Download failed: {ERROR}'.format(ERROR=str(err)))

    if accept_content_type and not is_web_url:
        content_type = get_header(response.headers, 'Content-type')

    reject_content_type = (accept_content_type and
                           (content_type is None or
                            not content_type.startswith(accept_content_type)))

    if reject_content_type:
        tty.debug("ignoring page {0}{1}{2}".format(
            url, " with content type " if content_type is not None else "",
            content_type or ""))

        return None, None, None

    return response.geturl(), response.headers, response
Esempio n. 10
0
def find_versions_of_archive(archive_urls,
                             list_url=None,
                             list_depth=0,
                             concurrency=32,
                             reference_package=None):
    """Scrape web pages for new versions of a tarball.

    Args:
        archive_urls (str or list or tuple): URL or sequence of URLs for
            different versions of a package. Typically these are just the
            tarballs from the package file itself. By default, this searches
            the parent directories of archives.
        list_url (str or None): URL for a listing of archives.
            Spack will scrape these pages for download links that look
            like the archive URL.
        list_depth (int): max depth to follow links on list_url pages.
            Defaults to 0.
        concurrency (int): maximum number of concurrent requests
        reference_package (spack.package.Package or None): a spack package
            used as a reference for url detection.  Uses the url_for_version
            method on the package to produce reference urls which, if found,
            are preferred.
    """
    if not isinstance(archive_urls, (list, tuple)):
        archive_urls = [archive_urls]

    # Generate a list of list_urls based on archive urls and any
    # explicitly listed list_url in the package
    list_urls = set()
    if list_url is not None:
        list_urls.add(list_url)
    for aurl in archive_urls:
        list_urls |= spack.url.find_list_urls(aurl)

    # Add '/' to the end of the URL. Some web servers require this.
    additional_list_urls = set()
    for lurl in list_urls:
        if not lurl.endswith('/'):
            additional_list_urls.add(lurl + '/')
    list_urls |= additional_list_urls

    # Grab some web pages to scrape.
    pages, links = spider(list_urls, depth=list_depth, concurrency=concurrency)

    # Scrape them for archive URLs
    regexes = []
    for aurl in archive_urls:
        # This creates a regex from the URL with a capture group for
        # the version part of the URL.  The capture group is converted
        # to a generic wildcard, so we can use this to extract things
        # on a page that look like archive URLs.
        url_regex = spack.url.wildcard_version(aurl)

        # We'll be a bit more liberal and just look for the archive
        # part, not the full path.
        url_regex = os.path.basename(url_regex)

        # We need to add a / to the beginning of the regex to prevent
        # Spack from picking up similarly named packages like:
        #   https://cran.r-project.org/src/contrib/pls_2.6-0.tar.gz
        #   https://cran.r-project.org/src/contrib/enpls_5.7.tar.gz
        #   https://cran.r-project.org/src/contrib/autopls_1.3.tar.gz
        #   https://cran.r-project.org/src/contrib/matrixpls_1.0.4.tar.gz
        url_regex = '/' + url_regex

        # We need to add a $ anchor to the end of the regex to prevent
        # Spack from picking up signature files like:
        #   .asc
        #   .md5
        #   .sha256
        #   .sig
        # However, SourceForge downloads still need to end in '/download'.
        url_regex += r'(\/download)?'
        # PyPI adds #sha256=... to the end of the URL
        url_regex += '(#sha256=.*)?'
        url_regex += '$'

        regexes.append(url_regex)

    # Build a dict version -> URL from any links that match the wildcards.
    # Walk through archive_url links first.
    # Any conflicting versions will be overwritten by the list_url links.
    versions = {}
    matched = set()
    for url in archive_urls + sorted(links):
        url = convert_to_posix_path(url)
        if any(re.search(r, url) for r in regexes):
            try:
                ver = spack.url.parse_version(url)
                if ver in matched:
                    continue
                versions[ver] = url
                # prevent this version from getting overwritten
                if url in archive_urls:
                    matched.add(ver)
                elif reference_package is not None:
                    if url == reference_package.url_for_version(ver):
                        matched.add(ver)
                else:
                    extrapolated_urls = [
                        spack.url.substitute_version(u, ver)
                        for u in archive_urls
                    ]
                    if url in extrapolated_urls:
                        matched.add(ver)
            except spack.url.UndetectableVersionError:
                continue

    return versions
Esempio n. 11
0
def make_installer(parser, args):
    """
       Use CMake to generate WIX installer in newly created build directory
    """
    if sys.platform == 'win32':
        output_dir = args.output_dir
        cmake_spec = Spec('cmake')
        cmake_spec.concretize()
        cmake_path = os.path.join(cmake_spec.prefix, "bin", "cmake.exe")
        cpack_path = os.path.join(cmake_spec.prefix, "bin", "cpack.exe")
        spack_source = args.spack_source
        git_verbosity = ""
        if args.git_verbosity:
            git_verbosity = "/" + args.git_verbosity

        if spack_source:
            if not os.path.exists(spack_source):
                print("%s does not exist" % spack_source)
                return
            else:
                if not os.path.isabs(spack_source):
                    spack_source = posixpath.abspath(spack_source)
                spack_source = convert_to_posix_path(spack_source)

        spack_version = args.spack_version

        here = os.path.dirname(os.path.abspath(__file__))
        source_dir = os.path.join(here, "installer")
        posix_root = convert_to_posix_path(spack.paths.spack_root)
        spack_license = posixpath.join(posix_root, "LICENSE-APACHE")
        rtf_spack_license = txt_to_rtf(spack_license)
        spack_license = posixpath.join(source_dir, "LICENSE.rtf")

        with open(spack_license, 'w') as rtf_license:
            written = rtf_license.write(rtf_spack_license)
            if written == 0:
                raise RuntimeError(
                    "Failed to generate properly formatted license file")
        spack_logo = posixpath.join(posix_root, "share/spack/logo/favicon.ico")

        try:
            spack.util.executable.Executable(cmake_path)(
                '-S', source_dir, '-B', output_dir,
                '-DSPACK_VERSION=%s' % spack_version,
                '-DSPACK_SOURCE=%s' % spack_source,
                '-DSPACK_LICENSE=%s' % spack_license,
                '-DSPACK_LOGO=%s' % spack_logo,
                '-DSPACK_GIT_VERBOSITY=%s' % git_verbosity)
        except spack.util.executable.ProcessError:
            print("Failed to generate installer")
            return spack.util.executable.ProcessError.returncode

        try:
            spack.util.executable.Executable(cpack_path)(
                "--config", "%s/CPackConfig.cmake" % output_dir, "-B",
                "%s/" % output_dir)
        except spack.util.executable.ProcessError:
            print("Failed to generate installer")
            return spack.util.executable.ProcessError.returncode
        try:
            spack.util.executable.Executable(
                os.environ.get('WIX') + '/bin/candle.exe')(
                    '-ext', 'WixBalExtension', '%s/bundle.wxs' % output_dir,
                    '-out', '%s/bundle.wixobj' % output_dir)
        except spack.util.executable.ProcessError:
            print("Failed to generate installer chain")
            return spack.util.executable.ProcessError.returncode
        try:
            spack.util.executable.Executable(
                os.environ.get('WIX') +
                "/bin/light.exe")("-sw1134", "-ext", "WixBalExtension",
                                  "%s/bundle.wixobj" % output_dir, '-out',
                                  '%s/Spack.exe' % output_dir)
        except spack.util.executable.ProcessError:
            print("Failed to generate installer chain")
            return spack.util.executable.ProcessError.returncode
        print("Successfully generated Spack.exe in %s" % (output_dir))
    else:
        print(
            'The make-installer command is currently only supported on Windows.'
        )