def fetch_from_url(self, url): # Run curl but grab the mime type from the http headers headers = spack.curl('-#', # status bar '-O', # save file to disk '-D', '-', # print out HTML headers '-L', url, return_output=True, fail_on_error=False) if spack.curl.returncode != 0: # clean up archive on failure. if self.archive_file: os.remove(self.archive_file) if spack.curl.returncode == 60: # This is a certificate error. Suggest spack -k raise FailedDownloadError( url, "Curl was unable to fetch due to invalid certificate. " "This is either an attack, or your cluster's SSL configuration " "is bad. If you believe your SSL configuration is bad, you " "can try running spack -k, which will not check SSL certificates." "Use this at your own risk.") # Check if we somehow got an HTML file rather than the archive we # asked for. We only look at the last content type, to handle # redirects properly. content_types = re.findall(r'Content-Type:[^\r\n]+', headers) if content_types and 'text/html' in content_types[-1]: tty.warn("The contents of " + self.archive_file + " look like HTML.", "The checksum will likely be bad. If it is, you can use", "'spack clean --dist' to remove the bad archive, then fix", "your internet gateway issue and install again.")
def fetch_from_url(self, url): try: # Run curl but grab the mime type from the http headers headers = spack.curl('-#', # status bar '-O', # save file to disk '-D', '-', # print out HTML headers '-L', url, return_output=True) except: # clean up archive on failure. if self.archive_file: os.remove(self.archive_file) raise # Check if we somehow got an HTML file rather than the archive we # asked for. We only look at the last content type, to handle # redirects properly. content_types = re.findall(r'Content-Type:[^\r\n]+', headers) if content_types and 'text/html' in content_types[-1]: tty.warn("The contents of " + self.archive_file + " look like HTML.", "The checksum will likely be bad. If it is, you can use", "'spack clean --all' to remove the bad archive, then fix", "your internet gateway issue and install again.")
def fetch(self): """Downloads the file at URL to the stage. Returns true if it was downloaded, false if it already existed.""" self.chdir() if self.archive_file: tty.msg("Already downloaded %s." % self.archive_file) else: tty.msg("Fetching %s" % self.url) try: # Run curl but grab the mime type from the http headers headers = spack.curl('-#', # status bar '-O', # save file to disk '-D', '-', # print out HTML headers '-L', self.url, return_output=True) except: # clean up archive on failure. if self.archive_file: os.remove(self.archive_file) raise # Check if we somehow got an HTML file rather than the archive we # asked for. We only look at the last content type, to handle # redirects properly. content_types = re.findall(r'Content-Type:[^\r\n]+', headers) if content_types and 'text/html' in content_types[-1]: tty.warn("The contents of " + self.archive_file + " look like HTML.", "The checksum will likely be bad. If it is, you can use", "'spack clean --all' to remove the bad archive, then fix", "your internet gateway issue and install again.") if not self.archive_file: raise FailedDownloadError(url) return self.archive_file
def fetch(self): self.stage.chdir() if self.archive_file: tty.msg("Already downloaded %s" % self.archive_file) return possible_files = self.stage.expected_archive_files save_file = None partial_file = None if possible_files: save_file = self.stage.expected_archive_files[0] partial_file = self.stage.expected_archive_files[0] + '.part' tty.msg("Trying to fetch from %s" % self.url) if partial_file: save_args = [ '-C', '-', # continue partial downloads '-o', partial_file ] # use a .part file else: save_args = ['-O'] curl_args = save_args + [ '-f', # fail on >400 errors '-D', '-', # print out HTML headers '-L', # resolve 3xx redirects self.url, ] if sys.stdout.isatty(): curl_args.append('-#') # status bar when using a tty else: curl_args.append('-sS') # just errors when not. # Run curl but grab the mime type from the http headers headers = spack.curl(*curl_args, output=str, fail_on_error=False) if spack.curl.returncode != 0: # clean up archive on failure. if self.archive_file: os.remove(self.archive_file) if partial_file and os.path.exists(partial_file): os.remove(partial_file) if spack.curl.returncode == 22: # This is a 404. Curl will print the error. raise FailedDownloadError(self.url, "URL %s was not found!" % self.url) elif spack.curl.returncode == 60: # This is a certificate error. Suggest spack -k raise FailedDownloadError( self.url, "Curl was unable to fetch due to invalid certificate. " "This is either an attack, or your cluster's SSL " "configuration is bad. If you believe your SSL " "configuration is bad, you can try running spack -k, " "which will not check SSL certificates." "Use this at your own risk.") else: # This is some other curl error. Curl will print the # error, but print a spack message too raise FailedDownloadError( self.url, "Curl failed with error %d" % spack.curl.returncode) # Check if we somehow got an HTML file rather than the archive we # asked for. We only look at the last content type, to handle # redirects properly. content_types = re.findall(r'Content-Type:[^\r\n]+', headers) if content_types and 'text/html' in content_types[-1]: tty.warn( "The contents of ", (self.archive_file if self.archive_file is not None else "the archive"), " look like HTML.", "The checksum will likely be bad. If it is, you can use", "'spack clean <package>' to remove the bad archive, then", "fix your internet gateway issue and install again.") if save_file: os.rename(partial_file, save_file) if not self.archive_file: raise FailedDownloadError(self.url)
def fetch(self): self.stage.chdir() if self.archive_file: tty.msg("Already downloaded %s" % self.archive_file) return possible_files = self.stage.expected_archive_files save_file = None partial_file = None if possible_files: save_file = self.stage.expected_archive_files[0] partial_file = self.stage.expected_archive_files[0] + '.part' tty.msg("Trying to fetch from %s" % self.url) if partial_file: save_args = ['-C', '-', # continue partial downloads '-o', partial_file] # use a .part file else: save_args = ['-O'] curl_args = save_args + [ '-f', # fail on >400 errors '-D', '-', # print out HTML headers '-L', # resolve 3xx redirects self.url, ] if sys.stdout.isatty(): curl_args.append('-#') # status bar when using a tty else: curl_args.append('-sS') # just errors when not. # Run curl but grab the mime type from the http headers headers = spack.curl(*curl_args, output=str, fail_on_error=False) if spack.curl.returncode != 0: # clean up archive on failure. if self.archive_file: os.remove(self.archive_file) if partial_file and os.path.exists(partial_file): os.remove(partial_file) if spack.curl.returncode == 22: # This is a 404. Curl will print the error. raise FailedDownloadError( self.url, "URL %s was not found!" % self.url) elif spack.curl.returncode == 60: # This is a certificate error. Suggest spack -k raise FailedDownloadError( self.url, "Curl was unable to fetch due to invalid certificate. " "This is either an attack, or your cluster's SSL " "configuration is bad. If you believe your SSL " "configuration is bad, you can try running spack -k, " "which will not check SSL certificates." "Use this at your own risk.") else: # This is some other curl error. Curl will print the # error, but print a spack message too raise FailedDownloadError( self.url, "Curl failed with error %d" % spack.curl.returncode) # Check if we somehow got an HTML file rather than the archive we # asked for. We only look at the last content type, to handle # redirects properly. content_types = re.findall(r'Content-Type:[^\r\n]+', headers) if content_types and 'text/html' in content_types[-1]: tty.warn( "The contents of " + self.archive_file + " look like HTML.", "The checksum will likely be bad. If it is, you can use", "'spack clean <package>' to remove the bad archive, then fix", "your internet gateway issue and install again.") if save_file: os.rename(partial_file, save_file) if not self.archive_file: raise FailedDownloadError(self.url)
def fetch(self): self.stage.chdir() if self.archive_file: tty.msg("Already downloaded %s." % self.archive_file) return tty.msg("Trying to fetch from %s" % self.url) curl_args = [ '-O', # save file to disk '-f', # fail on >400 errors '-D', '-', # print out HTML headers '-L', self.url, ] if sys.stdout.isatty(): curl_args.append('-#') # status bar when using a tty else: curl_args.append('-sS') # just errors when not. # Run curl but grab the mime type from the http headers headers = spack.curl(*curl_args, return_output=True, fail_on_error=False) if spack.curl.returncode != 0: # clean up archive on failure. if self.archive_file: os.remove(self.archive_file) if spack.curl.returncode == 22: # This is a 404. Curl will print the error. raise FailedDownloadError(self.url, "URL %s was not found!" % self.url) elif spack.curl.returncode == 60: # This is a certificate error. Suggest spack -k raise FailedDownloadError( self.url, "Curl was unable to fetch due to invalid certificate. " "This is either an attack, or your cluster's SSL configuration " "is bad. If you believe your SSL configuration is bad, you " "can try running spack -k, which will not check SSL certificates." "Use this at your own risk.") else: # This is some other curl error. Curl will print the # error, but print a spack message too raise FailedDownloadError( self.url, "Curl failed with error %d" % spack.curl.returncode) # Check if we somehow got an HTML file rather than the archive we # asked for. We only look at the last content type, to handle # redirects properly. content_types = re.findall(r'Content-Type:[^\r\n]+', headers) if content_types and 'text/html' in content_types[-1]: tty.warn( "The contents of " + self.archive_file + " look like HTML.", "The checksum will likely be bad. If it is, you can use", "'spack clean --dist' to remove the bad archive, then fix", "your internet gateway issue and install again.") if not self.archive_file: raise FailedDownloadError(self.url)