def test_custom_headers(self): # sending custom user agent ua = "pySmartDL/1.3.2" request_args = {"headers": {"User-Agent": ua}} obj = pySmartDL.SmartDL("http://httpbin.org/headers", request_args=request_args, progress_bar=False) obj.start() data = obj.get_json() self.assertTrue(data['headers']['User-Agent'] == ua) # passing empty request_args obj = pySmartDL.SmartDL("http://httpbin.org/headers", request_args={}, progress_bar=False) obj.start() self.assertTrue(obj.isSuccessful())
def test_hash(self): obj = pySmartDL.SmartDL(self.res_7za920_mirrors, progress_bar=False, connect_default_logger=self.enable_logging) obj.add_hash_verification('sha256' , self.res_7za920_hash) # good hash obj.start(blocking=False) # no exceptions obj.wait() self.assertTrue(obj.isSuccessful()) obj = pySmartDL.SmartDL(self.res_7za920_mirrors, progress_bar=False, connect_default_logger=self.enable_logging) obj.add_hash_verification('sha256' ,'a'*64) # bad hash obj.start(blocking=False) # no exceptions obj.wait() self.assertFalse(obj.isSuccessful()) self.assertTrue(any([isinstance(e, pySmartDL.HashFailedException) for e in obj.get_errors()]))
def download(dlurl, getjson=False, reuse=False): #Wrapper for pySmartDL #Uses locally cached files if the size is correct #Delete %TEMP%/pySmartDL if you get a corrupted download! #Returns either a directory, or a json dict try: import pySmartDL except: pass check = check_existing_download(dlurl, reuse) if check is not None: return check try: dlobj = pySmartDL.SmartDL(dlurl, timeout=20, request_args=request_args) dlobj.fetch_hash_sums() dlobj.start(blocking=True) if (getjson): return dlobj.get_json() else: return os.path.normpath(dlobj.get_dest()) except: if (getjson): raise Exception("Fetching JSON from" + dlurl + "failed!") print("Download failed! Trying backup method...") print(" ") return os.path.normpath(backupdownload(dlurl))
def test_basic_auth(self): basic_auth_test_url = "http://httpbin.org/basic-auth/user/passwd" obj = pySmartDL.SmartDL(basic_auth_test_url, progress_bar=False) obj.add_basic_authentication('user', 'passwd') obj.start() data = obj.get_data() self.assertTrue(json.loads(data)['authenticated'])
def download_video(self, url, name, destination): #makes sure the directory exists try: os.stat(destination) except: os.makedirs(destination) filename = name path = destination + filename obj = pySmartDL.SmartDL(url, destination, progress_bar=False, fix_urls=True) obj.start(blocking=False) location = obj.get_dest() while True: if obj.isFinished(): break print(name + "\t " + str( float("{0:.2f}".format((float(obj.get_progress()) * 100)))) + "% done at " + pySmartDL.utils.sizeof_human(obj.get_speed(human=False)) + "/s") #*epiode name* 0.38% done at 2.9 MB/s time.sleep(1) if obj.isFinished(): time.sleep(3) os.rename(location, path) else: print("Download of " + name + " failed") return path
def test_pause_unpause(self, testfile=None): obj = pySmartDL.SmartDL(testfile if testfile else self.res_7za920_mirrors, dest=self.dl_dir, progress_bar=False, connect_default_logger=self.enable_logging) obj.start(blocking=False) while not obj.get_dl_size(): time.sleep(0.1) # pause obj.pause() time.sleep(0.5) if obj.get_status() == "finished": # too bad, the file was too small and was downloaded complectely until we stopped it. # We should download a bigger file if self.res_testfile_100mb == testfile: self.fail("The download got completed before we could stop it, even though we've used a big file. Are we on a 100GB/s internet connection or somethin'?") return self.test_pause_unpause(testfile=self.res_testfile_100mb) dl_size = obj.get_dl_size() # verify download has really stopped time.sleep(2.5) self.assertEqual(dl_size, obj.get_dl_size()) # continue obj.unpause() time.sleep(2.5) self.assertNotEqual(dl_size, obj.get_dl_size()) obj.wait() self.assertTrue(obj.isSuccessful())
def test_timeout(self): self.assertRaises(socket.timeout, pySmartDL.SmartDL, "https://httpbin.org/delay/10", progress_bar=False, timeout=3, connect_default_logger=self.enable_logging) obj = pySmartDL.SmartDL("https://httpbin.org/delay/3", progress_bar=False, timeout=15, connect_default_logger=self.enable_logging) obj.start(blocking=False) obj.wait() self.assertTrue(obj.isSuccessful())
def download(self, urls, dest=None, file_name=None, debug=None): """Download method for managing the downloads.""" if not dest: dest = self.dest if not debug: debug = False if not isinstance(urls, list): urls = [urls] return_list = list() for url in urls: if not file_name: urlparser = urlparse.urlparse(url) file_name = urlparser.path.split("/")[-1] return_path = os.path.join(dest, file_name) download_obj = pySmartDL.SmartDL(urls=urls, progress_bar=self.progress_bar, dest=return_path, connect_default_logger=debug) download_obj.start(blocking=True) if download_obj.isSuccessful(): return_list.append(return_path) else: return_list.append(None) return return_list
def test_mirrors(self): urls = [ "http://totally_fake_website/7za.zip", "http://mirror.ufs.ac.za/7zip/9.20/7za920.zip" ] obj = pySmartDL.SmartDL(urls, dest=self.dl_dir, progress_bar=False) obj.start() self.assertTrue(obj.isSuccessful())
def test_download(self): obj = pySmartDL.SmartDL(self.res_7za920_mirrors, dest=self.dl_dir, progress_bar=False, connect_default_logger=self.enable_logging) obj.start() data = obj.get_data(binary=True, bytes=2) self.assertEqual(data, b'PK')
def test_stop(self): obj = pySmartDL.SmartDL(self.res_testfile_100mb, dest=self.dl_dir, progress_bar=False, connect_default_logger=self.enable_logging) obj.start(blocking=False) while not obj.get_dl_size(): time.sleep(0.1) obj.stop() obj.wait() self.assertFalse(obj.isSuccessful())
def test_download(self): obj = pySmartDL.SmartDL(self.res_7za920_mirrors, dest=self.dl_dir, progress_bar=False, connect_default_logger=self.enable_logging) obj.start() self.assertEqual(obj.get_progress_bar(), '[##################]') data = obj.get_data(binary=True, bytes=2) self.assertEqual(data, b'PK') # attempt to start a completed task with self.assertRaises(RuntimeError) as ctx: obj.start()
def test_mirrors(self): urls = [ "http://totally_fake_website/7za.zip", "https://github.com/iTaybb/pySmartDL/raw/master/test/7za920.zip" ] obj = pySmartDL.SmartDL(urls, dest=self.dl_dir, progress_bar=False, connect_default_logger=self.enable_logging) obj.start() self.assertTrue(obj.isSuccessful())
def test_download(self): obj = pySmartDL.SmartDL(self.default_7za920_mirrors, dest=self.dl_dir, progress_bar=False) obj.start() data = obj.get_data(binary=True, bytes=2) if sys.version_info >= (3, ): self.assertEqual(data, b'PK') else: self.assertEqual(data, 'PK')
def test_hash(self): obj = pySmartDL.SmartDL(self.default_7za920_mirrors, progress_bar=False) obj.add_hash_verification( 'sha256', '2a3afe19c180f8373fa02ff00254d5394fec0349f5804e0ad2f6067854ff28ac' ) # good hash obj.start(blocking=False) # no exceptions obj.wait() self.assertTrue(obj.isSuccessful()) obj = pySmartDL.SmartDL(self.default_7za920_mirrors, progress_bar=False) obj.add_hash_verification( 'sha256', 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa' ) # bad hash obj.start(blocking=False) # no exceptions obj.wait() self.assertFalse(obj.isSuccessful()) self.assertTrue( isinstance(obj.get_errors()[-1], pySmartDL.HashFailedException))
def test_speed_limiting(self): obj = pySmartDL.SmartDL(self.res_testfile_1gb, dest=self.dl_dir, progress_bar=False, connect_default_logger=self.enable_logging) obj.limit_speed(1024**2) # 1MB per sec obj.start(blocking=False) while not obj.get_dl_size(): time.sleep(0.1) time.sleep(30) expected_dl_size = 30 * 1024**2 allowed_delta = 0.6 # because we took only 30sec, the delta needs to be quite big, it we were to test 60sec the delta would probably be much smaller diff = math.fabs(expected_dl_size - obj.get_dl_size()) / expected_dl_size obj.stop() obj.wait() self.assertLessEqual(diff, allowed_delta)
def _unzip_temporary(url: str, member: str, keep: bool = False) -> Iterator[IO[str]]: if url.endswith('zip/allCountries.zip'): local_path = P.join(pygeons.db.DEFAULT_SUBDIR, 'zip/allCountries.zip') else: local_path = P.join(pygeons.db.DEFAULT_SUBDIR, P.basename(url)) download = not P.isfile(local_path) if download: pySmartDL.SmartDL(url, dest=local_path).start() assert P.isfile(local_path) with zipfile.ZipFile(local_path) as fin_zip: yield codecs.getreader(_ENCODING)(fin_zip.open(member)) if download and not keep: os.unlink(local_path)
def download(directory: os.path, resolution: str = 'low', texture_pack: int = 0, update_environment_variables=True): """Downloads MineRLv0 to specified directory. If directory is None, attempts to download to $MINERL_DATA_ROOT. Raises ValueError if both are undefined. Args: directory (os.path): destination root for downloading MineRLv0 datasets resolution (str, optional): one of [ 'low', 'high' ] corresponding to video resolutions of [ 64x64, 256,128 ] respectively (note: high resolution is not currently supported). Defaults to 'low'. texture_pack (int, optional): 0: default Minecraft texture pack, 1: flat semi-realistic texture pack. Defaults to 0. update_environment_variables (bool, optional): enables / disables exporting of MINERL_DATA_ROOT environment variable (note: for some os this is only for the current shell) Defaults to True. """ if directory is None: if 'MINERL_DATA_ROOT' in os.environ and len(os.environ['MINERL_DATA_ROOT']) > 0: directory = os.environ['MINERL_DATA_ROOT'] else: raise ValueError("Provided directory is None and $MINERL_DATA_ROOT is not defined") elif update_environment_variables: os.environ['MINERL_DATA_ROOT'] = os.path.expanduser( os.path.expandvars(os.path.normpath(directory))) # TODO pull JSON defining dataset URLS from webserver instead of hard-coding # TODO add hashed to website to verify downloads for mirrors filename, hashname = "data_texture_{}_{}_res.tar.gz".format(texture_pack, resolution), \ "data_texture_{}_{}_res.md5".format(texture_pack, resolution) urls = ["https://router.sneakywines.me/minerl/" + filename] hash_url = "https://router.sneakywines.me/minerl/" + hashname response = requests.get(hash_url) md5_hash = response.text obj = pySmartDL.SmartDL(urls, progress_bar=True, logger=logging.getLogger(__name__)) obj.add_hash_verification('md5', md5_hash) try: obj.start() except pySmartDL.HashFailedException: print("Hash check failed!") except pySmartDL.CanceledException: print("Download canceled by user") finally: logging.info('Extracting downloaded files ... ') tf = tarfile.open(obj.get_dest(), mode="r:*") tf.extractall(path=directory) return directory
def test_pause_unpause_stop(self): obj = pySmartDL.SmartDL(self.default_7za920_mirrors, dest=self.dl_dir, progress_bar=False) obj.start(blocking=False) while not obj.get_dl_size(): time.sleep(0.2) time.sleep(1) obj.pause() time.sleep(0.5) dl_size = obj.get_dl_size() time.sleep(2.5) self.assertEqual(dl_size, obj.get_dl_size()) obj.unpause() time.sleep(0.5) self.assertNotEqual(dl_size, obj.get_dl_size()) obj.stop() obj.wait() self.assertFalse(obj.isSuccessful())
def download_video(link, filename, destination): path = destination + filename obj = pySmartDL.SmartDL(link, destination, progress_bar=False, fix_urls=True) obj.start(blocking=False) location = obj.get_dest() j=10 i=1 while True: if obj.isFinished(): break k=j-i print(filename+' '*5+'[DOWNLOADING '+'.'*i+' '*k+' ' +str(float("{0:.2f}".format((float(obj.get_progress())*100))))+'% '+pySmartDL.utils.sizeof_human(obj.get_speed(human=False)) +'/s]', end='\r',flush=True) sys.stdout.write("\033[K") time.sleep(1) if i==10: i=1 else: i=i+1 if obj.isFinished(): print(filename+' '*5+'[DONE]') os.rename(location, path) else: print("DOWNLOAD OF " + filename + " FAILED") return path
def run(self): global count global download_list while True: host=self.queue.get() #print('url',host) nestlist=[x for x in episode_list if host in x[0]] if(nestlist): count=count + 1 if not os.path.isfile(nestlist[0][2] + nestlist[0][1]): episode=str(nestlist[0][3]) #print("Download", episode) #urlretrieve(str(host).replace(" ","%20"), str(nestlist[0][2] + "temp/" + nestlist[0][1])) #path=nestlist[0][2] + "temp/" + nestlist[0][1] #location=obj.get_dest() if "9xbuddy" in nestlist[0][0]: url = str(host).replace(" ","%20") file_name = nestlist[0][2] + nestlist[0][1] console_output = "Downloading " + nestlist[0][1] print(console_output) #add to download list try: download_list[nestlist[0][3]] = console_output # update except KeyError: download_list[nestlist[0][3]].append(console_output) # initial except Exception as e: utils.slog(e) utils.log("=E Download logic error") response = requests.get(url, stream=True) with open(file_name, 'wb') as out_file: shutil.copyfileobj(response.raw, out_file) del response #remove from download list try: del download_list[nestlist[0][3]] # remove except Exception as e: utils.slog(e) utils.log("=E Unable to remove episode" + str(nestlist[0][3])) print("Finished Downloading " + nestlist[0][1]) else: try: obj=pySmartDL.SmartDL(str(host).replace(" ","%20"), nestlist[0][2] + "temp/" + nestlist[0][1], progress_bar=False, fix_urls=True) if nestlist[0][4]: obj.headers = nestlist[0][4] obj.start(blocking=False) while True: if obj.isFinished(): break progress = obj.get_progress() * 100 if obj.get_eta() > 0 and (obj.get_progress() * 100) < 100: console_output=str(nestlist[0][1] + "\t " + str(float("{0:.2f}".format((float(obj.get_progress())*100)))) + "% done at " + pySmartDL.utils.sizeof_human(obj.get_speed(human=False)) + "/s, ETA: "+ obj.get_eta(human=True)) try: download_list[nestlist[0][3]]=console_output # update except KeyError: download_list[nestlist[0][3]].append(console_output) # initial except Exception as e: utils.slog(e) utils.log("=E Download logic error") time.sleep(1) if progress == 100 and obj.get_eta() == 0: time.sleep(1) if obj.isFinished(): try: del download_list[nestlist[0][3]] # remove except Exception as e: utils.slog(e) utils.log("=E Unable to remove episode" + str(nestlist[0][3])) try: if os.path.isfile(nestlist[0][2] + "temp/" + nestlist[0][1]): os.rename(nestlist[0][2] + "temp/" + nestlist[0][1], nestlist[0][2] + nestlist[0][1]) # move on download complete utils.log("Completed " + str(episode)) else: utils.log("=E Failed " + str(episode)) except Exception as e: utils.slog(e) utils.log("=E Failed moving " + str(nestlist[0][2] + "temp/" + nestlist[0][1]) + " to " + str(nestlist[0][2] + nestlist[0][1])) except socket.timeout as e: utils.log("=E Episode "+str(episode)+" timeout") count=count - 1 self.queue.task_done()
def test_unicode(self): url = "https://he.wikipedia.org/wiki/ג'חנון" obj = pySmartDL.SmartDL(url, progress_bar=False, connect_default_logger=self.enable_logging) obj.start()
def rip(album_id, isTrack, isDiscog, isPlist, session, comment, formatId, alcovs, downloadDir, keep_cover, folderTemplate, filenameTemplate, albumNumber, albumTotal, appSecret): if formatId == "5": fext = ".mp3" else: fext = ".flac" if isTrack: response = session.get( "https://www.qobuz.com/api.json/0.2/track/get?", params={ "track_id": album_id, }, ) albumMetadata = response.json()["album"] album_url = "https://play.qobuz.com/album/" + albumMetadata["id"] tracks = [response.json()] else: response = session.get( "https://www.qobuz.com/api.json/0.2/album/get?", params={ "album_id": album_id, }, ) album_url = "https://play.qobuz.com/album/" + album_id albumMetadata = response.json() if albumMetadata.get("code") == 404 or not albumMetadata["streamable"]: print( "Album does not appear to be streamable, and so we cannot download it. Try searching the album name on the web player \ and if it's available, use the link there. Otherwise, you may be able to use a proxy or VPN to another region." ) time.sleep(5) return try: tracks = [track for track in albumMetadata["tracks"]["items"]] except KeyError: print( "Could not fetch track information. This usually means that the album (or track if you put in one) is unavailable for your region. Please use a proxy or a VPN in another region \ or search the album name on the web player and use the link there.") time.sleep(5) return if alcovs == "3": album_cover_url = albumMetadata["image"]["large"][:-7] + "max.jpg" elif alcovs == "-1": pass else: album_cover_url = albumMetadata["image"][("thumbnail", "small", "large")[alcovs]] download_headers = { "range": "bytes=0-", "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_16) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/14.0 Safari/605.1.15", "referer": album_url } base_download_dir = Path(downloadDir) parsedAlbumMetadata = { "ALBUM": getMetadata(albumMetadata, "Album", "title"), "ALBUMARTIST": getMetadata(albumMetadata, "Album Artist", "artist", "name"), "COMMENT": comment, "GENRE": getMetadata(albumMetadata, "Genre", "genre", "name"), "ORGANIZATION": getMetadata(albumMetadata, "Record Label", "label", "name"), } if isPlist: if getConfig("plistAlbumTags", True, "Tags").lower() == "y": parsedAlbumMetadata['TRACKTOTAL'] = str( getMetadata(albumMetadata, "Album", "tracks_count")).zfill(2) else: parsedAlbumMetadata['TRACKTOTAL'] = str(albumTotal).zfill(2) else: parsedAlbumMetadata['TRACKTOTAL'] = str(len(tracks)).zfill(2) if "(" in parsedAlbumMetadata['ALBUM'] and ")" in parsedAlbumMetadata[ 'ALBUM']: changeYearBrackets = getConfig('changeYearBrackets', False, 'Main') if changeYearBrackets and changeYearBrackets.lower() == "y": folderTemplate = re.sub( r"([^)]*{YEAR}[^(]*)", lambda matchobj: matchobj.group(0).replace("(", "[").replace( ")", "]"), folderTemplate) if not comment: parsedAlbumMetadata.pop('COMMENT') elif comment.lower() == "url": parsedAlbumMetadata["COMMENT"] = albumMetadata['url'] date_fields = [ "release_date_original", "release_date_stream", "release_date_download" ] date_field = 0 while not parsedAlbumMetadata.get("YEAR"): try: parsedAlbumMetadata["YEAR"] = albumMetadata[ date_fields[date_field]].split("-")[0] except KeyError: pass date_field += 1 if date_field == 3: print("The API didn't return a year. Tag will be left empty, \ and you may want to report this on the GitHub with the album URL.") parsedAlbumMetadata["YEAR"] = "" break if not isPlist: album_download_dir = base_download_dir / sanitizeFilename( folderTemplate.format(**parsedAlbumMetadata)) else: album_download_dir = base_download_dir if alcovs != "-1": coverobj = pySmartDL.SmartDL(album_cover_url, str(album_download_dir / "cover.jpg"), progress_bar=False, threads=1) coverobj.start() if isDiscog: print( f'Album {albumNumber} of {albumTotal}: {getMetadata(albumMetadata, "Album Artist", "artist", "name")} - {getMetadata(albumMetadata, "Album", "title")}:' ) elif not isTrack: print( f'{getMetadata(albumMetadata, "Album Artist", "artist", "name")} - {getMetadata(albumMetadata, "Album", "title")}:' ) for track in tracks: if isTrack: ver = tracks[0].get("version", str()) if not isDiscog and not isPlist: if ver: print( f'{getMetadata(albumMetadata, "Album Artist", "artist", "name")} - {getMetadata(albumMetadata, "Album", "title")} ({ver}):' ) else: print( f'{getMetadata(albumMetadata, "Album Artist", "artist", "name")} - {getMetadata(albumMetadata, "Album", "title")}:' ) else: ver = track.get("version", str()) if not isPlist: track_number = str(tracks.index(track) + 1).zfill(2) else: if getConfig("plistAlbumTags", True, "Tags").lower() == "y": track_number = str(track['track_number']).zfill(2) else: track_number = str(albumNumber).zfill(2) if not track["streamable"]: print( f"Track {track_number} is restricted by right holders. Can't download." ) continue metadata = { "ARTIST": getMetadata(track, "Artist", "performer", "name"), "COMPOSER": getMetadata(track, "Composer", "composer", "name"), "COPYRIGHT": getMetadata(track, "Copyright", "copyright"), "TITLE": getMetadata(track, "Title", "title"), "TRACKNUMBER": str(track_number), "ISRC": getMetadata(track, "ISRC", "isrc") } metadata.update(parsedAlbumMetadata) metadata["DATE"] = metadata.pop("YEAR") if getConfig("versionInTitle", True, "Tags").lower() == "y" \ and ver \ and ver not in metadata["TITLE"]: metadata['TITLE'] = f"{metadata['TITLE']} ({ver})" metadata_keys = [key for key in metadata.keys()] for field in metadata_keys: try: if getConfig(field, False, "Tags").lower() == "n": del metadata[field] except AttributeError: pass if getConfig('extendedMetadata', True, 'Tags').lower( ) == "y" and formatId != "5" and "performers" in track: performers = dict() # removing control characters; qobuz likes to put carriage returns in their extended metadata qobuzPerformers = re.sub(r'[\x00-\x1f\x7f-\x9f]', '', track["performers"]) for performerItem in qobuzPerformers.split(" - "): if len(performerItem.split(", ")[:2]) >= 2: person, role = performerItem.split(", ")[:2] else: continue role = role.upper() if role != "UNKNOWN": if performers.get(role, False): performers[role].append(person) else: performers[role] = [person] for role, people in performers.items(): if len(people) <= 1: metadata[role] = people[0] elif len(people) > 1: metadata[role + "s"] = ", ".join(people) current_time = time.time() magic_strings = [ "979549437fcc4a3faad4867b5cd25dcb", # h.initialSeed("OTc5NTQ5NDM3ZmNjNGEzZmFhZDQ4Nj", window.utimezone.berlin) "75bad70145953840a998ddadc9bb1c03", # h.initialSeed("NzViYWQ3MDE0NTk1Mzg0MGE5OThkZG", window.utimezone.dublin) "10b251c286cfbf64d6b7105f253d9a2e", # h.initialSeed("MTBiMjUxYzI4NmNmYmY2NGQ2YjcxMD", window.utimezone.london) "2ab7131d383623cf403cf3d4676c56b6", # h.initialSeed("MmFiNzEzMWQzODM2MjNjZjQwM2NmM2", window.utimezone.algier) "9b6f8d1e34febcd1fef94004026582fe", # h.initialSeed("OWI2ZjhkMWUzNGZlYmNkMWZlZjk0MD", window.utimezone.paris) appSecret ] magic_string = magic_strings[ 0] # TODO: iterate over each string and check if it works or not reqsigt = f"trackgetFileUrlformat_id{formatId}intentstreamtrack_id{track['id']}{current_time}{magic_string}" reqsighst = hashlib.md5(reqsigt.encode('utf-8')).hexdigest() responset = session.get( "https://www.qobuz.com/api.json/0.2/track/getFileUrl?", params={ "request_ts": current_time, "request_sig": reqsighst, "track_id": track["id"], "format_id": formatId, "intent": "stream" }) tr = responset.json() if isPlist: track_number = str(albumNumber).zfill(2) isRes = False try: finalurltr = tr['url'] except KeyError: isRes = True else: if "restrictions" in tr: if "TrackRestrictedByRightHolders" in tr['restrictions']: isRes = True if 'sample' in tr and tr['sample']: isRes = True if isRes: print( f"Track {track_number} is restricted by right holders. Can't download." ) continue temporary_filename = album_download_dir / f"{track_number}{fext}" songobj = pySmartDL.SmartDL(finalurltr, str(temporary_filename), request_args={"headers": download_headers}) if formatId == "5": albumFormat = "320kbps MP3" else: try: albumFormat = f"{tr['bit_depth']} bits / {tr['sampling_rate']} kHz - {track['maximum_channel_count']} channels" except KeyError: albumFormat = "Unknown" if not isPlist: trTot = str(len(tracks)).zfill(2) else: trTot = albumTotal if ver: print( f"Downloading track {track_number} of {trTot}: {track['title']} ({ver}) - {albumFormat}" ) else: print( f"Downloading track {track_number} of {trTot}: {track['title']} - {albumFormat}" ) songobj.start() if alcovs != "-1": albumArt = (album_download_dir / "cover.jpg").open(mode='rb').read() else: albumArt = "" if fext == ".mp3": add_mp3_tags(temporary_filename, metadata) if alcovs != "-1": add_mp3_cover(temporary_filename, albumArt) else: add_flac_tags(temporary_filename, metadata) if alcovs != "-1": add_flac_cover(temporary_filename, album_download_dir / 'cover.jpg') filename = album_download_dir / sanitizeFilename( filenameTemplate.format(**metadata) + fext) if filename.exists(): os.remove(filename) try: os.rename(temporary_filename, filename) except OSError: print( "Failed to rename track. Maybe it exceeds the max path length for your OS." ) if alcovs != "-1": if keep_cover.lower() == "n": if (album_download_dir / "cover.jpg").exists(): os.remove(album_download_dir / "cover.jpg") else: if not (album_download_dir / "folder.jpg").exists(): os.rename(album_download_dir / "cover.jpg", album_download_dir / "folder.jpg") else: os.remove(album_download_dir / "cover.jpg") if not isPlist: if "goodies" in albumMetadata: if albumMetadata["goodies"][0]["file_format_id"] == 21: print("Booklet available, downloading...") bookletobj = pySmartDL.SmartDL( albumMetadata["goodies"][0]["original_url"], str(album_download_dir / "booklet.pdf"), request_args={"headers": download_headers}) bookletobj.start()
def test_unicode(self): url = u"http://he.wikipedia.org/wiki/ג'חנון" obj = pySmartDL.SmartDL(url, progress_bar=False) obj.start()