Example #1
0
    def __call__(self, *args, **kwargs):
        """Run the executable with subprocess.check_output, return output."""
        return_output = kwargs.get("return_output", False)
        fail_on_error = kwargs.get("fail_on_error", True)

        quoted_args = [arg for arg in args if re.search(r'^"|^\'|"$|\'$', arg)]
        if quoted_args:
            tty.warn("Quotes in command arguments can confuse scripts like configure.",
                     "The following arguments may cause problems when executed:",
                     str("\n".join(["    "+arg for arg in quoted_args])),
                     "Quotes aren't needed because spack doesn't use a shell.",
                     "Consider removing them")

        cmd = self.exe + list(args)
        tty.verbose(" ".join(cmd))

        try:
            proc = subprocess.Popen(
                cmd,
                stderr=sys.stderr,
                stdout=subprocess.PIPE if return_output else sys.stdout)
            out, err = proc.communicate()
            if fail_on_error and proc.returncode != 0:
                raise SpackError("command '%s' returned error code %d"
                                 % (" ".join(cmd), proc.returncode))
            if return_output:
                return out

        except subprocess.CalledProcessError, e:
            if fail_on_error: raise
Example #2
0
    def fetch_from_url(self, url):
        try:
            # Run curl but grab the mime type from the http headers
            headers = spack.curl('-#',        # status bar
                                 '-O',        # save file to disk
                                 '-D', '-',   # print out HTML headers
                                 '-L', url, return_output=True)
        except:
            # clean up archive on failure.
            if self.archive_file:
                os.remove(self.archive_file)
            raise

        # Check if we somehow got an HTML file rather than the archive we
        # asked for.  We only look at the last content type, to handle
        # redirects properly.
        content_types = re.findall(r'Content-Type:[^\r\n]+', headers)
        if content_types and 'text/html' in content_types[-1]:
            tty.warn("The contents of " + self.archive_file + " look like HTML.",
                     "The checksum will likely be bad.  If it is, you can use",
                     "'spack clean --all' to remove the bad archive, then fix",
                     "your internet gateway issue and install again.")
Example #3
0
    def fetch(self):
        """Downloads the file at URL to the stage.  Returns true if it was downloaded,
           false if it already existed."""
        self.chdir()
        if self.archive_file:
            tty.msg("Already downloaded %s." % self.archive_file)

        else:
            tty.msg("Fetching %s" % self.url)

            try:
                # Run curl but grab the mime type from the http headers
                headers = spack.curl('-#',        # status bar
                                     '-O',        # save file to disk
                                     '-D', '-',   # print out HTML headers
                                     '-L', self.url, return_output=True)
            except:
                # clean up archive on failure.
                if self.archive_file:
                    os.remove(self.archive_file)
                raise

            # Check if we somehow got an HTML file rather than the archive we
            # asked for.  We only look at the last content type, to handle
            # redirects properly.
            content_types = re.findall(r'Content-Type:[^\r\n]+', headers)
            if content_types and 'text/html' in content_types[-1]:
                tty.warn("The contents of " + self.archive_file + " look like HTML.",
                         "The checksum will likely be bad.  If it is, you can use",
                         "'spack clean --all' to remove the bad archive, then fix",
                         "your internet gateway issue and install again.")

        if not self.archive_file:
            raise FailedDownloadError(url)

        return self.archive_file