def download_from_url(url, destination_path, force=False, aspera=False): """Download file from remote server :param url: path to the file on remote server (including file name) :param destination_path: path to the file on local machine (including file name) :param force: bool - if file exist force to overwrite it , defaults to False """ try: is_already_downloaded = os.path.isfile(destination_path) if is_already_downloaded: if force: stderr.write("Downloading %s to %s\n" % (url, destination_path)) fn = wgetter.download(url, outdir=os.path.dirname(destination_path)) else: stderr.write( "File already exist. Use force=True if you would like to overwrite it.\n" ) else: if aspera: download_aspera(url, destination_path) else: stderr.write("Downloading %s to %s\n" % (url, destination_path)) fn = wgetter.download(url, outdir=os.path.dirname(destination_path)) except URLError: stderr.write("Cannot find file %s" % url)
def on_status(self, status): # Check if CSV exists. Else, create it. if os.path.isfile("csv-files/terrortracking.csv") == False: with open("csv-files/terrortracking.csv", "w", newline="", encoding="utf-8") as f: writer = csv.writer(f) writer.writerow(["Screen name", "Created At", "Location", "Lat", "Long", "Media link"]) with open("csv-files/terrortracking.csv", "a", newline="") as f: writer = csv.writer(f) try: lat = status.coordinates["coordinates"][0] long = status.coordinates["coordinates"][1] except: lat = "" long = "" try: geo = status.place.name except: geo = "" media = status.entities.get("media", []) if len(media) > 0: media = media[0]["media_url"] # name=str(status.created_at)+'_'+status.author.screen_name # name += self.extensionFinder(media) wgetter.download(media, outdir="TerrorAttachment") writer.writerow([status.author.screen_name, status.created_at, status.text, geo, lat, long, media])
def on_status(self, status): #Check if CSV exists. Else, create it. if os.path.isfile('csv-files/terrortracking.csv') == False: with open('csv-files/terrortracking.csv','w',newline='',encoding='utf-8') as f: writer=csv.writer(f) writer.writerow(['Screen name','Created At','Location','Lat','Long','Media link']) with open('csv-files/terrortracking.csv', 'a',newline="") as f: if '@terrorbgone'in status.text.lower():#hack to filter writer = csv.writer(f) try: lat=status.coordinates['coordinates'][0] long=status.coordinates['coordinates'][1] except: lat='' long='' try: geo = status.place.name except: geo='' media = status.entities.get('media', []) if(len(media) > 0): media=media[0]['media_url'] #name=str(status.created_at)+'_'+status.author.screen_name #name += self.extensionFinder(media) wgetter.download(media,outdir="TerrorAttachment") writer.writerow([status.author.screen_name, status.created_at, status.text,geo,lat,long,media])
def download_from_url(url, destination_path, force=False, aspera=False, silent=False): """Download file from remote server. If the file is already downloaded and ``force`` flag is on the file will be removed. Args: url (:obj:`str`): Path to the file on remote server (including file name) destination_path (:obj:`str`): Path to the file on local machine (including file name) force (:obj:`bool`): If file exist force to overwrite it. Defaults to False. aspera (:obj:`bool`): Download with Aspera Connect. Defaults to False. silent (:obj:`bool`): Do not print any message. Defaults to False. """ try: is_already_downloaded = os.path.isfile(destination_path) if is_already_downloaded: if force: try: os.remove(destination_path) except Exception: if not silent: logger.error("Cannot delete %s" % destination_path, exc_info=True) if not silent: logger.info("Downloading %s to %s" % (url, destination_path)) fn = wgetter.download( url, outdir=os.path.dirname(destination_path)) else: with closing(urlopen(url, timeout=30)) as r: with open(destination_path, mode='wb') as f: copyfileobj(r, f) else: logger.info( ("File %s already exist. Use force=True if you" " would like to overwrite it.") % destination_path) else: if aspera: download_aspera(url, destination_path) else: if not silent: logger.info("Downloading %s to %s" % (url, destination_path)) fn = wgetter.download( url, outdir=os.path.dirname(destination_path)) else: with closing(urlopen(url, timeout=30)) as r: with open(destination_path, mode='wb') as f: copyfileobj(r, f) except URLError: logger.error("Cannot find file %s" % url)
def download_vot(years=[2013, 2014, 2015], out_dir="../Data"): """ Dowloads VOT for given years and places then into the datasets directory. """ for year in years: url = vot_url(year) print("Downloading VOT {0}".format(year)) wgetter.download(url, outdir=out_dir)
def get_PDFs(version, source, pdfsets): pdf_source = source + 'pdfsets/' + version + '/' for pdfset in pdfsets: stamp = pdfset + '.stamp' if not os.path.isfile(os.path.join(build_path, stamp)): tfile = pdfset + '.tar.gz' print 'Getting ' + pdfset wgetter.download(pdf_source + tfile, outdir=build_path) tar = tarfile.open(name=os.path.join(build_path, tfile), mode='r:gz') tar.extractall(path=os.path.join(install_path, 'share', 'LHAPDF')) os.chdir(build_path) with open(stamp, 'a'): os.utime(stamp, None)
def on_status(self, status): global api #Check if CSV exists. Else, create it. if os.path.exists(LOG_FILE) == False: create_logging_file() with open(LOG_FILE, 'a', newline="") as f: if TWITTER_ACCOUNT in status.text.lower(): #hack to filter writer = csv.writer(f) try: lat = status.coordinates['coordinates'][1] long = status.coordinates['coordinates'][0] except: lat = '' long = '' try: geo = status.place.name except: geo = '' media = status.entities.get('media', []) if (len(media) > 0): media = media[0]['media_url'] #name=str(status.created_at)+'_'+status.author.screen_name #name += self.extensionFinder(media) wgetter.download(media, outdir=TWITTER_PIC_DIRECTORY) writer.writerow([ status.author.screen_name, status.created_at, status.text, geo, lat, long, media ]) print("Downloaded! Running classifier..") else: return True #ci.imageClassify("TerrorAttachment") last_image = get_last_image(TWITTER_PIC_DIRECTORY) print("Picture URL is", last_image) with open(last_image, 'rb') as f: r = requests.get("http://127.0.0.1:5000/upload", files={"file": f}) response = r.json() f.close() name = status.user.screen_name tweet_id = status.id_str disease = response["Disease"] est_time = response["Average Duration"] symptoms = response["Symptoms"].replace('\r', '').replace('\n', ' ') return_reply = "Hey @{}, Its likely {}, lasting {}, with symptoms {}".format( name, disease, est_time, symptoms) return_reply = return_reply[:140] #140 char api.update_status(return_reply, tweet_id) print("Done!")
def download_album(self, album): for track_index,track in enumerate(album['tracks']): track_meta = { "artist": album['artist'], "album": album['title'], "title": track['title'], "track": track['track'], "date": album['date'] } print("Accessing track " + str(track_index+1) + " of " + str(len(album['tracks']))) filename = self.template_to_path(track_meta).encode('utf-8') dirname = self.create_directory(filename) if not self.overwrite and os.path.isfile(filename): re_encoded_track_title = track['title'].encode('utf-8') print( "Skipping track {} - {} as it's already downloaded, use --overwrite to overwrite existing files".format( track['track'], re_encoded_track_title)) continue if not track.get('url'): re_encoded_track_title = track['title'].encode('utf-8') print("Skipping track {} - {} as it is not available".format( track['track'], re_encoded_track_title)) continue try: track_url = track['url'] # Check and see if HTTP is in the track_url if 'http' not in track_url: track_url = 'http:%s' % track_url tmp_file = wgetter.download(track_url, outdir=dirname) os.rename(tmp_file, filename) self.write_id3_tags(filename, track_meta) except Exception as e: print(e) print("Downloading failed..") return False try: tmp_art_file = wgetter.download(album['art'], outdir=dirname) os.rename(tmp_art_file, dirname + "/cover.jpg") except Exception as e: print(e) print("Couldn't download album art.") return True
def download_album(self, album): for track_index, track in enumerate(album['tracks']): track_meta = { "artist": album['artist'], "album": album['title'], "title": track['title'], "track": track['track'], "date": album['date'] } print("Accessing track " + str(track_index + 1) + " of " + str(len(album['tracks']))) filename = self.template_to_path(track_meta).encode('utf-8') dirname = self.create_directory(filename) if not self.overwrite and os.path.isfile(filename): re_encoded_track_title = track['title'].encode('utf-8') print "Skipping track {} - {} as it's already downloaded, use --overwrite to overwrite existing files".format( track['track'], re_encoded_track_title) continue if not track.get('url'): re_encoded_track_title = track['title'].encode('utf-8') print "Skipping track {} - {} as it is not available".format( track['track'], re_encoded_track_title) continue try: track_url = track['url'] # Check and see if HTTP is in the track_url if 'http' not in track_url: track_url = 'http:%s' % track_url tmp_file = wgetter.download(track_url, outdir=dirname) os.rename(tmp_file, filename) self.write_id3_tags(filename, track_meta) except Exception as e: print e print "Downloading failed.." return False try: tmp_art_file = wgetter.download(album['art'], outdir=dirname) os.rename(tmp_art_file, dirname + "/cover.jpg") except Exception as e: print e print "Couldn't download album art." return True
def download_tracks(track_info, track_title, directory): """ Downloads the track that was sent to it Skips over tracks that have already been downloaded :param track_info: :param track_title: :param directory: :return: """ download_url = track_info['file']['mp3-128'] # Fix the URL, this is a new error I've been getting # This just added https:// to the front of the URL download_url = "https://" + download_url[2:] # downloads the mp3 from url, creates # a temporary file in the directory tmp_file = wgetter.download(download_url, outdir=directory) # create the file name with path and .mp3 if platform.system() == "Windows": file_name = directory + "\\" + track_title + ".mp3" else: file_name = directory + "/" + track_title + ".mp3" # if file already exists, we skip that file and delete the tmp_file if os.path.isfile(file_name): print("Skipping file: " + file_name + " already exists.") pass else: # replace the name of tmp_file with # track title and .mp3 os.rename(tmp_file, file_name) print("\nDone downloading track\n")
def messenger(path): #path2 = request.headers["Auth_Token"] ## Holy shit this is the hackiest thing ive done so far url = request.url image_url = url[url.find("api/") + 4:] # end hacky. folder_name = "messengerpics" check_or_make_folder(folder_name) wgetter.download(image_url, outdir=folder_name) file = glob.glob(folder_name + '/' + '*.jpg')[0] pic = preprocess_single_image(file) pred_class = model.predict_classes(pic)[0] pred_class_name = get_pred_class_name(pred_class) pred_class_extra_details_dic = get_pred_class_extra_details( pred_class_name) pred_class_extra_details_dic["class"] = pred_class_name return jsonify(pred_class_extra_details_dic)
def download_OSM(args): config = MapZenSettings(args.settings) settings = config.get_settings() dest = settings.get('sources').get('data_dir') url = settings.get('sources').get('data_url') filename = settings.get('sources').get('filename') file_path = wgetter.download(url, outdir=dest) data_file = os.path.join(dest, filename) if file_path: os.rename(file_path, data_file)
def download_album(self, album): for track in album["tracks"]: track_meta = { "artist": album["artist"], "album": album["title"], "title": track["title"], "track": track["track"], "date": album["date"], } filename = self.template_to_path(track_meta) dirname = self.create_directory(filename) if not self.overwrite and os.path.isfile(filename): print "Skipping track {} - {} as it's already downloaded, use --overwrite to overwrite existing files".format( track["track"], track["title"] ) continue if not track.get("url"): print "Skipping track {} - {} as it is not available".format(track["track"], track["title"]) continue try: tmp_file = wgetter.download(track["url"], outdir=dirname) os.rename(tmp_file, filename) self.write_id3_tags(filename, track_meta) except Exception as e: print e print "Downloading failed.." return False try: tmp_art_file = wgetter.download(album["art"], outdir=dirname) os.rename(tmp_art_file, dirname + "/cover.jpg") except Exception as e: print e print "Couldn't download album art." return True
def download_album(self, album): for track in album['tracks']: track_meta = { "artist": album['artist'], "album": album['title'], "title": track['title'], "track": track['track'], "date": album['date'] } filename = self.template_to_path(track_meta) dirname = self.create_directory(filename) if not self.overwrite and os.path.isfile(filename): print "Skipping track {} - {} as it's already downloaded, use --overwrite to overwrite existing files".format(track['track'], track['title']) continue if not track.get('url'): print "Skipping track {} - {} as it is not available".format(track['track'], track['title']) continue try: tmp_file = wgetter.download(track['url'], outdir=dirname) os.rename(tmp_file, filename) self.write_id3_tags(filename, track_meta) except Exception as e: print e print "Downloading failed.." return False try: tmp_art_file = wgetter.download(album['art'], outdir=dirname) os.rename(tmp_art_file, dirname + "/cover.jpg") except Exception as e: print e print "Couldn't download album art." return True
def handle_start(message): #check if url is valid if re.match(r'https:\/\/*.*\.bandcamp\.com\/track\/*.*', message.text): try: url = message.text meta = get_songs(url, message.chat.id) artist = meta['artist'] cover_url = meta['cover'] album = meta['album'] song_url = meta['songs'][0][0][1] song = meta['songs'][0][0][0] wgetter.download(song_url, outdir=download_dir) wgetter.download(cover_url, outdir=download_dir) audio_file = download_dir + '/' + song_url.split( '/mp3-128/')[-1].split('?')[0] cover_file = download_dir + '/' + cover_url.split('img/')[1] cover = open(cover_file, 'rb') bot.send_photo(GROUP_ID, cover, caption='{} - {}'.format(artist, album), disable_notification=True) os.remove(cover_file) audio = open(audio_file, 'rb') bot.send_audio(GROUP_ID, audio, performer=artist, title=song, disable_notification=False) os.remove(audio_file) except Exception as e: bot.send_message(message.chat.id, e)
def install(package): package_name = package['package'] + '-' + package['version'] stamp = package_name + '.stamp' if not os.path.isfile(os.path.join(build_path, stamp)): tfile = package_name + '.tar.gz' url = package['source'] + tfile bt.show_variable('url', url) try: wgetter.download(url, outdir=build_path) except urllib2.HTTPError: print 'Got blocked. Trying manual wget' os.chdir(os.path.join(build_path)) bt.call_verbose(['wget', url]) tar = tarfile.open(name=os.path.join(build_path, tfile), mode='r:gz') tar.extractall(path=build_path) os.chdir(os.path.join(build_path, package_name)) prefix = '--prefix=' + os.path.join(install_path) bt.call_verbose(['./configure', prefix] + package['configure_options']) bt.call_verbose(['make', '-j', jobs]) bt.call_verbose(['make', 'install']) bt.call_verbose(['make', '-j', jobs, 'check']) os.chdir(build_path) with open(stamp, 'a'): os.utime(stamp, None)
def install(package, source, configure_options): stamp = package + '.stamp' if not os.path.isfile(os.path.join(build_path, stamp)): tfile = package + '.tar.gz' url = source + tfile show_variable('url', url) filename = wgetter.download(url, outdir=build_path) tar = tarfile.open(name=os.path.join(build_path, tfile), mode='r:gz') tar.extractall(path=build_path) os.chdir(os.path.join(build_path, package)) prefix = '--prefix=' + os.path.join(install_path) call_verbose(['./configure', prefix] + configure_options) call_verbose(['make']) call_verbose(['make', '-j']) call_verbose(['make', '-j', 'install']) call_verbose(['make', '-j', 'check']) os.chdir(build_path) with open(stamp, 'a'): os.utime(stamp, None)
def download_album_cover(self): """ Downloads album cover from URL """ print("\nDownloading album cover...\n") tmp_file = wgetter.download(self.__album_data['cover_url'], outdir=self.__file_path) if platform.system() == "Windows": self.__album_data['cover'] = self.__file_path + "\\" + "cover.jpg" else: self.__album_data['cover'] = self.__file_path + "/" + "cover.jpg" # if file already exists, we skip that file and delete the tmp_file if os.path.isfile(self.__album_data['cover']): os.remove(tmp_file) print("Skipping file: " + self.__album_data['cover_url'] + " already exists.") else: os.rename(tmp_file, self.__album_data['cover']) print("\nDone downloading album cover!\n")
file.write( 'PICID, PICURL, TAKEN, LOCATION, REALNAME, TITLE, DESCRIPTION, PATH_ALIAS' ) file.write('\r\n') for line in group_pool_photos: photoinfo = flickr.photos.getInfo(photo_id=line['id']) description = ( photoinfo['photo']['description']['_content']).replace( ",", "").encode("utf-8") if not description: description = 'na' taken = photoinfo['photo']['dates']['taken'] path_alias = photoinfo['photo']['owner']['path_alias'] if not path_alias: path_alias = 'na' title = photoinfo['photo']['title']['_content'].replace( ";", "").replace(",", "").encode("utf-8") location = photoinfo['photo']['owner']['location'].replace( ";", "").replace(",", "").encode("utf-8") realname = photoinfo['photo']['owner']['realname'].replace( ";", "").replace(",", "").replace(' ', ' ').replace( ' ', ' ').replace(' ', ' ').encode("utf-8") picsize = flickr.photos.getSizes(photo_id=line['id']) picurl = (picsize['sizes']['size'][-1]['source']) file.write(line['id'] + "," + picurl + "," + taken + "," + location + "," + realname + "," + title + "," + description + "," + path_alias) file.write('\r\n') filename = wgetter.download(picurl, outdir=group_id) time.sleep(0.5)
def download(url, output_dir): filename = wgetter.download(url, outdir=output_dir) if filename: LOG.info('Downloaded file is at %s ...', filename) return filename
import wgetter, gzip, tarfile fileName = wgetter.download( 'https://www.cs.toronto.edu/~kriz/cifar-10-binary.tar.gz') gzip_file = gzip.open('cifar-10-binary.tar.gz', 'rb') ungzip_file = open('cifar-10-binary.tar', 'wb') ungzip_file.write(gzip_file.read()) gzip_file.close() ungzip_file.close() tar_file = tarfile.open('cifar-10-binary.tar', 'r') tar_file.extractall('.') tar_file.close()
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Mon Dec 12 08:52:53 2016 @author: waffleboy """ import wgetter, os BASE_URL = "http://weather.gov.sg/files/dailydata/DAILYDATA_{}_{}.csv" wd = "/home/waffleboy/Desktop/NEA/weather_data" if not os.path.exists(wd): os.mkdir(wd) YEAR_RANGE = [2012, 2013, 2014, 2015, 2016] MONTH_RANGE = list(range(1, 13)) MONTH_RANGE = ['0' + str(x) if x < 10 else str(x) for x in MONTH_RANGE] LOCATION_RANGE = {"Jurong West": "S44", "Clementi": "S50"} for location in LOCATION_RANGE.values(): for year in YEAR_RANGE: for month in MONTH_RANGE: duration = year + month wgetter.download(BASE_URL.format(location, duration), outdir=wd)
for entry in d['entries']: if entry['title'].encode("utf-16le") not in db.keys(): url = re.search('src="([^"]+)"', entry['summary_detail']['value']).group(1) entry_date = datetime.fromtimestamp( (mktime(entry['published_parsed']))) puts( colored.white("Downloading comic: " + entry['title'].encode("utf-16le"))) with indent(5, quote=">"): puts( colored.white("Published on:" + str(datetime.now() - entry_date))) filename = wgetter.download(url, outdir=COMIC_DIR) entry['title'] = entry['title'].encode("utf-16le") db[entry['title']] = entry else: skipped = skipped + 1 db.close() comicfiles = [f for f in os.listdir(COMIC_DIR)] comicfiles.sort(key=getint) puts(colored.white("Creating CBZ file")) zf = zipfile.ZipFile("dist/jl8-comics.cbz", "w") for f in comicfiles: if f not in zf.namelist(): puts(colored.yellow("Adding " + f + " to CBZ")) zf.write(COMIC_DIR + f, f)
def get_GEO_file(geo, destdir=None, annotate_gpl=False, how="full", include_data=False): """Given GEO accession download corresponding SOFT file :param geo: str -- GEO database identifier :param destdir: str -- directory to download data :param how: str -- GSM download mode: full ... :param include_data: bool -- full download of GPLs including series and samples :returns: tuple -- path to downladed file, type of GEO object """ geo = geo.upper() geotype = geo[:3] range_subdir = sub(r"\d{1,3}$", "nnn", geo) if destdir is None: tmpdir = mkdtemp() stderr.write("No destination directory specified." " Temporary files will be downloaded at %s\n" % tmpdir) else: tmpdir = destdir if geotype == "GDS": gseurl = "ftp://ftp.ncbi.nlm.nih.gov/geo/{root}/{range_subdir}/{record}/soft/{record_file}" url = gseurl.format(root="datasets", range_subdir=range_subdir, record=geo, record_file="%s.soft.gz" % geo) filepath = path.join(tmpdir, "{record}.soft.gz".format(record=geo)) elif geotype == "GSE": gseurl = "ftp://ftp.ncbi.nlm.nih.gov/geo/{root}/{range_subdir}/{record}/soft/{record_file}" url = gseurl.format(root="series", range_subdir=range_subdir, record=geo, record_file="%s_family.soft.gz" % geo) filepath = path.join(tmpdir, "{record}_family.soft.gz".format(record=geo)) elif geotype == "GSM": gsmurl = "http://www.ncbi.nlm.nih.gov/geo/query/acc.cgi?targ=self&acc={record}&form=text&view={how}" url = gsmurl.format(record=geo, how=how) filepath = path.join(tmpdir, "{record}.txt".format(record=geo)) elif geotype == "GPL": if annotate_gpl: gplurl = "ftp://ftp.ncbi.nlm.nih.gov/geo/{root}/{range_subdir}/{record}/annot/{record_file}" url = gplurl.format(root="platforms", range_subdir=range_subdir, record=geo, record_file="%s.annot.gz" % geo) filepath = path.join(tmpdir, "{record}.annot.gz".format(record=geo)) if not path.isfile(filepath): try: stderr.write("Downloading %s to %s\n" % (url, filepath)) fn = wgetter.download(url, outdir=path.dirname(filepath)) return filepath, geotype except URLError: stderr.write("Annotations for %s are not available, trying submitter GPL\n" % geo) else: stderr.write("File already exist: using local version.\n") return filepath, geotype if include_data: url = "ftp://ftp.ncbi.nlm.nih.gov/geo/platforms/{0}/{1}/soft/{1}_family.soft.gz".format( range_subdir, geo) filepath = path.join(tmpdir, "{record}_family.soft.gz".format(record=geo)) else: gplurl = "http://www.ncbi.nlm.nih.gov/geo/query/acc.cgi?targ=self&acc={record}&form=text&view={how}" url = gplurl.format(record=geo, how=how) filepath = path.join(tmpdir, "{record}.txt".format(record=geo)) if not path.isfile(filepath): stderr.write("Downloading %s to %s\n" % (url, filepath)) fn = wgetter.download(url, outdir=path.dirname(filepath)) stderr.write("\n") else: stderr.write("File already exist: using local version.\n") return filepath, geotype else: raise UnknownGEOTypeException("%s type is not known" % geotype) if not path.isfile(filepath): stderr.write("Downloading %s to %s\n" % (url, filepath)) fn = wgetter.download(url, outdir=path.dirname(filepath)) stderr.write("\n") else: stderr.write("File already exist: using local version.\n") return filepath, geotype
def handle_start(message): #check if url is valid if re.match(r'https:\/\/*.*\.bandcamp\.com\/track\/*.*', message.text): url = message.text try: response = requests.get(url) tree = html.fromstring(response.content) #get song cover cover_url = tree.xpath('//img[@itemprop="image"]/@src')[0] #get song and artist from metadata meta = tree.xpath('//meta[@name="title"]/@content')[0] response = response.text #get audio url audio_url = response.split('"file":{"mp3-128":"')[-1].split( '"}')[0] meta = meta.split(", by ") track = meta[0] #track name artist = meta[1] #artist name tmp_file = response.split('/mp3-128/')[-1].split('?')[0] except: sys.exit() dirc = UPLOAD_DIR + artist #create artists's dir try: os.mkdir(dirc) #download audio to created dir wgetter.download(audio_url, outdir=dirc) except: bot.send_message(message.chat.id, 'error occured while downloading track') old_file = '%s/%s' % (dirc, tmp_file) #create new name for downloaded file: new_file = '%s/%s - %s.mp3' % (dirc, artist, track) try: #download song cover wgetter.download(cover_url, outdir=dirc) except: bot.send_message(message.chat.id, 'error occured while downloading image') #rename audio file os.rename(old_file, new_file) try: audiofile = EasyID3(new_file) audiofile['artist'] = artist audiofile['title'] = track audiofile.save() except mutagen.id3.ID3NoHeaderError: audiofile = mutagen.File(new_file, easy=True) audiofile.add_tags() audiofile['artist'] = artist audiofile['title'] = track audiofile.save() #send files to chat files = os.listdir(dirc) for file_path in files: if '.mp3' not in file_path: img_url = '%s/%s' % (dirc, file_path) #send cover try: file = open(img_url, 'rb') bot.send_photo(BOT_TAG, file) #delete cover after sending os.remove(img_url) #send audio file = open(new_file, 'rb') bot.send_audio(BOT_TAG, file) #delete audio and folser after sending os.remove(new_file) os.rmdir(dirc) except: try: #delete all files if upload fails os.remove(img_url) os.remove(new_file) os.rmdir(dirc) except: print('upload failed') else: bot.send_message(message.chat.id, 'url is not valid')
import wgetter, gzip file_names = [ 'train-images-idx3-ubyte', 'train-labels-idx1-ubyte', 't10k-images-idx3-ubyte', 't10k-labels-idx1-ubyte' ] for i in range(0, len(file_names)): fileName = wgetter.download('http://yann.lecun.com/exdb/mnist/%s.gz' % file_names[i]) inF = gzip.open(file_names[i] + '.gz', 'rb') outF = open(file_names[i], 'wb') outF.write(inF.read()) inF.close() outF.close()
def downloadFile(self,fileID,folder_path): URL = 'https://ivle.nus.edu.sg/api/downloadfile.ashx?APIKey='+self.API_KEY+'&AuthToken='+self.TOKEN+'&ID='+fileID+'&target=workbin' wgetter.download(URL,outdir=folder_path) return
import wgetter import csv import os.path import nameofpage as n with open(n.nameofpage+'csv.csv', 'r') as csvfile: spamreader = csv.reader(csvfile) for index,i in enumerate(spamreader): if index > 0: print i[0] l=i[0].split("/")[-1] if '?' in l: l=l.split("?")[0] print n.nameofpage+'/'+l if os.path.exists(n.nameofpage+'/'+l): print "already exists" else: wgetter.download(i[0], outdir=n.nameofpage+'/')
def setup_button(self): ########## Background ########## try: if struct.calcsize('P') * 8 == 64: ctypes.windll.user32.SystemParametersInfoW(20, 0, src.paths.importBackgroundPath, 3) else: ctypes.windll.user32.SystemParametersInfoA(20, 0, src.paths.importBackgroundPath, 3) except: src.qtObjects.error_message("BGx03") ########## Files ########## shutil.copytree(src.paths.importFilesPath, src.paths.files_save_path) ########## Fonts ########## try: from ctypes import wintypes try: import winreg except ImportError: import _winreg as winreg user32 = ctypes.WinDLL('user32', use_last_error=True) gdi32 = ctypes.WinDLL('gdi32', use_last_error=True) FONTS_REG_PATH = r'Software\Microsoft\Windows NT\CurrentVersion\Fonts' HWND_BROADCAST = 0xFFFF SMTO_ABORTIFHUNG = 0x0002 WM_FONTCHANGE = 0x001D GFRI_DESCRIPTION = 1 GFRI_ISTRUETYPE = 3 if not hasattr(wintypes, 'LPDWORD'): wintypes.LPDWORD = ctypes.POINTER(wintypes.DWORD) user32.SendMessageTimeoutW.restype = wintypes.LPVOID user32.SendMessageTimeoutW.argtypes = (wintypes.HWND, wintypes.UINT, wintypes.LPVOID, wintypes.LPVOID, wintypes.UINT, wintypes.UINT, wintypes.LPVOID) gdi32.AddFontResourceW.argtypes = (wintypes.LPCWSTR,) gdi32.GetFontResourceInfoW.argtypes = (wintypes.LPCWSTR, wintypes.LPDWORD, wintypes.LPVOID, wintypes.DWORD) for file_ in os.listdir(src.paths.importFontsPath): if file_.endswith('.ttf') or file_.endswith('.otf'): src_path = os.path.join(src.paths.importFontsPath, file_) dst_path = os.path.join(os.environ['SystemRoot'], 'Fonts', os.path.basename(src_path)) shutil.copy(src_path, dst_path) if not gdi32.AddFontResourceW(dst_path): os.remove(dst_path) raise WindowsError('AddFontResource failed to load "%s"' % src_path) user32.SendMessageTimeoutW(HWND_BROADCAST, WM_FONTCHANGE, 0, 0, SMTO_ABORTIFHUNG, 1000, None) filename = os.path.basename(dst_path) fontname = os.path.splitext(filename)[0] cb = wintypes.DWORD() if gdi32.GetFontResourceInfoW(filename, ctypes.byref(cb), None, GFRI_DESCRIPTION): buf = (ctypes.c_wchar * cb.value)() if gdi32.GetFontResourceInfoW(filename, ctypes.byref(cb), buf, GFRI_DESCRIPTION): fontname = buf.value is_truetype = wintypes.BOOL() cb.value = ctypes.sizeof(is_truetype) gdi32.GetFontResourceInfoW(filename, ctypes.byref(cb), ctypes.byref(is_truetype), GFRI_ISTRUETYPE) if is_truetype: fontname += ' (TrueType)' with winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, FONTS_REG_PATH, 0, winreg.KEY_SET_VALUE) as key: winreg.SetValueEx(key, fontname, 0, winreg.REG_SZ, filename) except: src.qtObjects.error_message("FNTx05") ########## Software ########## try: if not os.path.exists(src.paths.software_save_path): os.makedirs(src.paths.software_save_path) with open(src.paths.importSoftwareLinksPath, 'r') as r: software_list = r.readlines() software_list = [x.strip() for x in software_list] for software in software_list: link = software + 'post_download' request = requests.get(link) source = request.text soup = BeautifulSoup(source, 'html.parser') download_link = soup.find('script', type='text/javascript', attrs={'data-qa-download-url':True}) download_link = download_link.get('data-qa-download-url') filename = wgetter.download(download_link, outdir=src.paths.software_save_path) except: src.qtObjects.error_message("SOFx02") src.qtObjects.success_message()
if debug: with indent(5, quote=">"): puts(colored.red("Deleting temp file:"+f)) os.remove(COMIC_DIR+f) skipped=0 for entry in d['entries']: if entry['title'].encode("utf-16le") not in db.keys(): url = re.search('src="([^"]+)"',entry['summary_detail']['value']).group(1) entry_date = datetime.fromtimestamp((mktime(entry['published_parsed']))) puts(colored.white("Downloading comic: "+entry['title'].encode("utf-16le"))) with indent(5, quote=">"): puts(colored.white("Published on:"+str(datetime.now()-entry_date))) filename = wgetter.download(url,outdir=COMIC_DIR) entry['title']=entry['title'].encode("utf-16le") db[entry['title']]=entry else: skipped=skipped+1 db.close() comicfiles = [ f for f in os.listdir(COMIC_DIR)] comicfiles.sort(key=getint) puts(colored.white("Creating CBZ file")) zf = zipfile.ZipFile("dist/jl8-comics.cbz", "w") for f in comicfiles: if f not in zf.namelist(): puts(colored.yellow("Adding "+f+" to CBZ")) zf.write(COMIC_DIR+f,f)