def gtfs_download(self, url, dt, zone): """Do downloading of one file.""" print("Downloading", self.slug, url, zone, dt) # Use only standard library functions to avoid dependencies. #furl = urllib.urlopen(url) opener = FancyURLopener() # We have to set up an authentication method on the opener if # we will need to authenticate. This does HTTP BASIC only so # far. if 'authentication' in self.data: auth_name = self.data['authentication'] auth = auth_data['sites'][auth_name] # A callback method which performs the authentication. # Return (user, pass) tuple. opener.prompt_user_passwd = \ lambda host, realm: (auth['username'], auth['password']) # URL parameters auth method if 'url_suffix' in auth: url = url + auth['url_suffix'] if "{API_KEY}" in url: try: auth_name = self.data['authentication'] except KeyError: auth_name = self.name auth = auth_data['sites'][auth_name] url = url.format(API_KEY=auth['API_KEY']) # Make GTFS path. gtfs_path = self.path_gtfszip(dt, zone) util.makedirs(os.path.dirname(gtfs_path)) # Open the URL. print("**** Connecting to %s" % url) # Open GTFS and relay data from web to file. with util.create_file(gtfs_path) as tmp_gtfs_path: opener.retrieve(url, tmp_gtfs_path) self.test_corrupted_zip(gtfs_path)
def _download() -> None: debug('Downloading server from', url) target = sublime.active_window() label = 'Downloading PromQL language server' with ActivityIndicator(target, label): try: opener = FancyURLopener() tmp_file, _ = opener.retrieve(url) if not checksum_verified(checksum, tmp_file): debug('Checksum error.') sublime.status_message('Server binary', os.path.basename(tmp_file), 'checkusm error.') return # extract and copy the cache with tarfile.open(tmp_file) as tf: tf.extractall(self._cache_path) os.unlink(tmp_file) self._ready = True except Exception as ex: debug('Failed downloading server:', ex) finally: opener.close()
fnames = {} fnames['t1'] = 't1.tar' fnames['t2'] = 't2.tar' fnames['pd'] = 'pd.tar' fnames['mra'] = 'mra.tar' fnames['demographic'] = 'demographic.xls' if DOWNLOAD_IMAGES: # Download all IXI data for key, url in urls.items(): if not os.path.isfile(fnames[key]): print('Downloading {} from {}'.format(fnames[key], url)) curr_file = FancyURLopener() curr_file.retrieve(url, fnames[key]) else: print('File {} already exists. Skipping download.'.format( fnames[key])) if EXTRACT_IMAGES: # Extract the HH subset of IXI for key, fname in fnames.items(): if (fname.endswith('.tar')): print('Extracting IXI HH data from {}.'.format(fnames[key])) output_dir = os.path.join('./orig/', key) if not os.path.exists(output_dir): os.makedirs(output_dir)