def _audio_tdat(atuple): audio, atag, advanced, _, _ = atuple if advanced: param = ast.literal_eval(atag) audio.add(TDAT(3, param[1])) else: audio.add(TDAT(3, atag))
def add_mp3_tags(fileobj, tags, cover=None, lyrics=None, image_mimetype='image/png'): handle = MP3(fileobj=fileobj) if 'artist' in tags: handle['TPE1'] = TPE1(text=tags['artist']) if 'title' in tags: handle['TIT2'] = TIT2(text=tags['title']) if 'album' in tags: handle['TALB'] = TALB(text=tags['album']) if 'albumartist' in tags: handle['TPE2'] = TPE2(text=tags['albumartist']) if 'genre' in tags: handle['TCON'] = TCON(genres=[tags['genre']]) if 'tracknumber' in tags: handle['TRCK'] = TRCK(text=tags['tracknumber']) if 'year' in tags: handle['TYER'] = TYER(text=tags['year']) if 'date' in tags: handle['TDAT'] = TDAT(text=tags['date']) if 'bpm' in tags: handle['TBPM'] = TBPM(text=tags['bpm']) if 'isrc' in tags: handle['TSRC'] = TSRC(text=tags['isrc']) if 'explicit' in tags: handle['TXXX'] = TXXX(text=tags['explicit']) if lyrics: handle['USLT'] = USLT(text=lyrics) if cover: handle['APIC'] = APIC(data=cover, mime=image_mimetype) handle.save(fileobj) fileobj.seek(0)
def test_tyer_tdat(self): id3 = ID3() id3.version = (2, 3) id3.add(TYER(encoding=0, text="2006")) id3.add(TDAT(encoding=0, text="0603")) id3.update_to_v24() self.failUnlessEqual(id3["TDRC"], "2006-03-06")
def test_multiple_tyer_tdat_time(self): id3 = ID3() id3.version = (2, 3) id3.add(TYER(text=['2000', '2001', '2002', '19xx', 'foo'])) id3.add(TDAT(text=['0102', '0304', '1111bar'])) id3.add(TIME(text=['1220', '1111quux', '1111'])) id3.update_to_v24() assert [str(t) for t in id3['TDRC']] == \ ['2000-02-01 12:20:00', '2001-04-03', '2002']
def update_to_v23(self, join_with="/"): """Convert older (and newer) tags into an ID3v2.3 tag. This updates incompatible ID3v2 frames to ID3v2.3 ones. If you intend to save tags as ID3v2.3, you must call this function at some point. """ if self.version < (2, 3, 0): del self.unknown_frames[:] # TMCL, TIPL -> TIPL if "TIPL" in self or "TMCL" in self: people = [] if "TIPL" in self: f = self.pop("TIPL") people.extend(f.people) if "TMCL" in self: f = self.pop("TMCL") people.extend(f.people) if "IPLS" not in self: self.add(IPLS(encoding=f.encoding, people=people)) # TODO: # * EQU2 -> EQUA # * RVA2 -> RVAD # TDOR -> TORY if "TDOR" in self: f = self.pop("TDOR") if f.text: d = f.text[0] if d.year and "TORY" not in self: self.add(TORY(encoding=f.encoding, text="%04d" % d.year)) # TDRC -> TYER, TDAT, TIME if "TDRC" in self: f = self.pop("TDRC") if f.text: d = f.text[0] if d.year and "TYER" not in self: self.add(TYER(encoding=f.encoding, text="%04d" % d.year)) if d.month and d.day and "TDAT" not in self: self.add( TDAT(encoding=f.encoding, text="%02d%02d" % (d.day, d.month))) if d.hour and d.minute and "TIME" not in self: self.add( TIME(encoding=f.encoding, text="%02d%02d" % (d.hour, d.minute))) if "TCON" in self: self["TCON"].genres = self["TCON"].genres if self.version < (2, 3): # ID3v2.2 PIC frames are slightly different. pics = self.getall("APIC") mimes = {"PNG": "image/png", "JPG": "image/jpeg"} self.delall("APIC") for pic in pics: newpic = APIC(encoding=pic.encoding, mime=mimes.get(pic.mime, pic.mime), type=pic.type, desc=pic.desc, data=pic.data) self.add(newpic) # ID3v2.2 LNK frames are just way too different to upgrade. self.delall("LINK") # leave TSOP, TSOA and TSOT even though they are officially defined # only in ID3v2.4, because most applications use them also in ID3v2.3 # New frames added in v2.4. for key in [ "ASPI", "EQU2", "RVA2", "SEEK", "SIGN", "TDRL", "TDTG", "TMOO", "TPRO" ]: if key in self: del (self[key]) for frame in self.values(): # ID3v2.3 doesn't support UTF-8 (and WMP can't read UTF-16 BE) if hasattr(frame, "encoding"): if frame.encoding > 1: frame.encoding = 1 # ID3v2.3 doesn't support multiple values if isinstance(frame, mutagen.id3.TextFrame): try: frame.text = [join_with.join(frame.text)] except TypeError: frame.text = frame.text[:1]
def __init_id3_tags(id3, major=3): """ Attributes: id3 ID3 Tag object major ID3 major version, e.g.: 3 for ID3v2.3 """ from mutagen.id3 import TRCK, TPOS, TXXX, TPUB, TALB, UFID, TPE2, \ TSO2, TMED, TIT2, TPE1, TSRC, IPLS, TORY, TDAT, TYER id3.add(TRCK(encoding=major, text="1/10")) id3.add(TPOS(encoding=major, text="1/1")) id3.add( TXXX(encoding=major, desc="MusicBrainz Release Group Id", text="e00305af-1c72-469b-9a7c-6dc665ca9adc")) id3.add(TXXX(encoding=major, desc="originalyear", text="2011")) id3.add( TXXX(encoding=major, desc="MusicBrainz Album Type", text="album")) id3.add( TXXX(encoding=major, desc="MusicBrainz Album Id", text="e7050302-74e6-42e4-aba0-09efd5d431d8")) id3.add(TPUB(encoding=major, text="J&R Adventures")) id3.add(TXXX(encoding=major, desc="CATALOGNUMBER", text="PRAR931391")) id3.add(TALB(encoding=major, text="Don\'t Explain")) id3.add( TXXX(encoding=major, desc="MusicBrainz Album Status", text="official")) id3.add(TXXX(encoding=major, desc="SCRIPT", text="Latn")) id3.add( TXXX(encoding=major, desc="MusicBrainz Album Release Country", text="US")) id3.add(TXXX(encoding=major, desc="BARCODE", text="804879313915")) id3.add( TXXX(encoding=major, desc="MusicBrainz Album Artist Id", text=[ "3fe817fc-966e-4ece-b00a-76be43e7e73c", "984f8239-8fe1-4683-9c54-10ffb14439e9" ])) id3.add(TPE2(encoding=major, text="Beth Hart & Joe Bonamassa")) id3.add(TSO2(encoding=major, text="Hart, Beth & Bonamassa, Joe")) id3.add(TXXX(encoding=major, desc="ASIN", text="B005NPEUB2")) id3.add(TMED(encoding=major, text="CD")) id3.add( UFID(encoding=major, owner="http://musicbrainz.org", data=b"f151cb94-c909-46a8-ad99-fb77391abfb8")) id3.add(TIT2(encoding=major, text="Sinner's Prayer")) id3.add( TXXX(encoding=major, desc="MusicBrainz Artist Id", text=[ "3fe817fc-966e-4ece-b00a-76be43e7e73c", "984f8239-8fe1-4683-9c54-10ffb14439e9" ])) id3.add(TPE1(encoding=major, text=["Beth Hart & Joe Bonamassa"])) id3.add( TXXX(encoding=major, desc="Artists", text=["Beth Hart", "Joe Bonamassa"])) id3.add(TSRC(encoding=major, text=["NLB931100460", "USMH51100098"])) id3.add( TXXX(encoding=major, desc="MusicBrainz Release Track Id", text="d062f484-253c-374b-85f7-89aab45551c7")) id3.add( IPLS(encoding=major, people=[["engineer", "James McCullagh"], ["engineer", "Jared Kvitka"], ["arranger", "Jeff Bova"], ["producer", "Roy Weisman"], ["piano", "Beth Hart"], ["guitar", "Blondie Chaplin"], ["guitar", "Joe Bonamassa"], ["percussion", "Anton Fig"], ["drums", "Anton Fig"], ["keyboard", "Arlan Schierbaum"], ["bass guitar", "Carmine Rojas"], ["orchestra", "The Bovaland Orchestra"], ["vocals", "Beth Hart"], ["vocals", "Joe Bonamassa"]])), id3.add(TORY(encoding=major, text="2011")) id3.add(TYER(encoding=major, text="2011")) id3.add(TDAT(encoding=major, text="2709"))
def update_id3(self, path: str, track: beatport.Track): #AIFF Check aiff = None if path.endswith('.aiff') or path.endswith('.aif'): aiff = AIFF(path) f = aiff.tags else: f = ID3() f.load(path, v2_version=3, translate=True) #Update tags if UpdatableTags.title in self.config.update_tags and self.config.overwrite: f.setall('TIT2', [TIT2(text=track.title)]) if UpdatableTags.artist in self.config.update_tags and self.config.overwrite: f.setall('TPE1', [ TPE1(text=self.config.artist_separator.join( [a.name for a in track.artists])) ]) if UpdatableTags.album in self.config.update_tags and ( self.config.overwrite or len(f.getall('TALB')) == 0): f.setall('TALB', [TALB(text=track.album.name)]) if UpdatableTags.label in self.config.update_tags and ( self.config.overwrite or len(f.getall('TPUB')) == 0): f.setall('TPUB', [TPUB(text=track.label.name)]) if UpdatableTags.bpm in self.config.update_tags and ( self.config.overwrite or len(f.getall('TBPM')) == 0): f.setall('TBPM', [TBPM(text=str(track.bpm))]) if UpdatableTags.genre in self.config.update_tags and ( self.config.overwrite or len(f.getall('TCON')) == 0): f.setall('TCON', [TCON(text=', '.join([g.name for g in track.genres]))]) #Dates if UpdatableTags.date in self.config.update_tags: #ID3 v2.3 if self.config.id3v23 and (self.config.overwrite or (len(f.getall('TYER')) == 0 and len(f.getall('TDAT')) == 0)): date = track.release_date.strftime('%d%m') f.setall('TDRC', []) f.setall('TDAT', [TDAT(text=date)]) f.setall('TYER', [TYER(text=str(track.release_date.year))]) #ID3 v2.4 if not self.config.id3v23 and (self.config.overwrite or len(f.getall('TDRC')) == 0): date = track.release_date.strftime('%Y-%m-%d') f.setall('TDAT', []) f.setall('TYER', []) f.setall('TDRC', [TDRC(text=date)]) if UpdatableTags.key in self.config.update_tags and ( self.config.overwrite or len(f.getall('TKEY')) == 0): f.setall('TKEY', [TKEY(text=track.id3key())]) if UpdatableTags.publishdate in self.config.update_tags and ( self.config.overwrite or len(f.getall('TDRL')) == 0): # f.setall('TORY', [TORY(text=str(track.publish_date.year))]) if not self.config.id3v23: date = track.publish_date.strftime('%Y-%m-%d') f.setall('TDRL', [TDRL(text=date)]) #Other keys if UpdatableTags.other in self.config.update_tags: f.add(TXXX(desc='WWWAUDIOFILE', text=track.url())) f.add(TXXX(desc='WWWPUBLISHER', text=track.label.url('label'))) #Redownlaod cover if self.config.replace_art: try: url = track.art(self.config.art_resolution) r = requests.get(url) data = APIC(encoding=3, mime='image/jpeg', type=3, desc=u'Cover', data=r.content) f.delall('APIC') f['APIC:cover.jpg'] = data except Exception: logging.warning('Error downloading cover for file: ' + path) if aiff == None: if self.config.id3v23: f.save(path, v2_version=3, v1=0) else: f.save(path, v2_version=4, v1=0) else: aiff.save()
def download(broadcast, targetDir, reliveUrlTemplate): broadcastStartDT = parse(broadcast['start']) broadcastEndDT = parse(broadcast['end']) # build filename from channel, show title and broadcast datetime, while escaping "bad" characters filename = os.path.join( targetDir, re.sub( '[^\w\s\-\.\[\]]', '_', broadcast['trackingInfos']['pageVars']['broadcast_service'] + ' ' + broadcastStartDT.astimezone( pytz.timezone('Europe/Berlin')).strftime("%Y-%m-%d %H:%M") + ' ' + broadcast['trackingInfos']['pageVars']['topline']) + ".mp3") # skip broadcast if file is already exists if os.path.isfile(filename) and os.path.getsize(filename) > 0: print("%s already exists, skipping." % filename, flush=True) return # get links to all audio segments of this broadcast segmentUrls = getSegmentUrls(broadcastStartDT, broadcastEndDT, reliveUrlTemplate) if segmentUrls is None: # skip broadcast if no segments available print("Skipping %s, not yet in relive" % filename) return # dowload all ts segments, and convert them to mp3 print("Downloading %s ..." % filename, end=" ", flush=True) try: sound = AudioSegment.empty() for i in segmentUrls: sound += AudioSegment.from_file(BytesIO(urlopen(i).read())) sound.export(filename, format="mp3") except: print("failed.", flush=True) return else: print("done.", flush=True) # ID3: remove all tags try: tags = ID3(filename) tags.delete() except ID3NoHeaderError: tags = ID3() # ID3: save as much information as possible in the ID3 tags tags.add( TRSN( text=[broadcast['trackingInfos']['pageVars']['broadcast_service'] ])) tags.add( TPE1( text=[broadcast['trackingInfos']['pageVars']['broadcast_service'] ])) tags.add( TALB(text=[ " - ".join( list( dict.fromkeys([ broadcast['trackingInfos']['pageVars']['topline'], broadcast['trackingInfos']['pageVars']['title'] ]))) ])) tags.add(TRCK(text=['1/1'])) #tags.add(TIT2(text=[broadcastStartDT.astimezone(pytz.timezone('Europe/Berlin')).strftime("%Y-%m-%d %H:%M")])) tags.add(TIT2(text=[broadcast['publicationOf']['title']])) tags.add( COMM(lang="deu", desc="desc", text=[broadcast['publicationOf']['description']])) tags.add( TYER(text=[ broadcastStartDT.astimezone(pytz.timezone( 'Europe/Berlin')).strftime("%Y") ])) tags.add( TDAT(text=[ broadcastStartDT.astimezone(pytz.timezone( 'Europe/Berlin')).strftime("%d%m") ])) tags.add( TIME(text=[ broadcastStartDT.astimezone(pytz.timezone( 'Europe/Berlin')).strftime("%H%M") ])) tags.add( TLEN(text=[ int((broadcastEndDT - broadcastStartDT).total_seconds() * 1000) ])) tags.add(WOAS(url=broadcast['publicationOf']['canonicalUrl'])) tags.add(WORS(url="https://www.br.de/radio/")) # ID3: chapters chapterNr = 0 for chapter in broadcast['items']: chapterStartDT = parse(chapter['start']) if 'duration' in chapter and chapter['duration'] is not None: chapterEndDT = chapterStartDT + timedelta( seconds=chapter['duration']) else: chapterEndDT = broadcastEndDT artists = [] for i in ['performer', 'author']: if i in chapter and chapter[i] is not None and len(chapter[i]) > 0: artists.append(chapter[i]) titles = [] for i in ['title']: if i in chapter and chapter[i] is not None and len(chapter[i]) > 0: titles.append(chapter[i]) tags.add( CHAP(element_id=chapterNr, start_time=floor( (chapterStartDT - broadcastStartDT).total_seconds() * 1000), end_time=ceil( (chapterEndDT - broadcastStartDT).total_seconds() * 1000), sub_frames=[ TIT2(text=[ " - ".join([" ".join(artists), " ".join(titles)]) ]) ])) chapterNr += 1 tocList = ",".join([str(i) for i in range(0, chapterNr)]) tags.add( CTOC(element_id="toc", flags=CTOCFlags.TOP_LEVEL | CTOCFlags.ORDERED, child_element_ids=[tocList], sub_frames=[TIT2(text=["Table Of Contents"])])) # ID3: cover image response = requests.get( broadcast['publicationOf']['defaultTeaserImage']['url'], timeout=5) if response.status_code == 200: tags.add( APIC(mime=response.headers['content-type'], desc="Front Cover", data=response.content)) # save ID3 tags tags.save(filename, v2_version=3)
def main(self): ydl_opts = { # 'rm_cachedir' : True, # delete by self.rm_cache_dir() before download 'verbose' : True, 'cachedir' : self.cacheFolder, 'keepvideo' : True, 'logger' : self.logger, 'noprogress' : True, 'progress_hooks': [self.ydl_hook] } # if self.rm_cache_dir: if os.path.isdir(self.cacheFolder): # self.cacheFolder will be recreated by youtub-dl later during download self.logger.info('INFO:: Removing cache folder: {}'.format(self.cacheFolder)) shutil.rmtree(self.cacheFolder, ignore_errors=True) else: self.logger.debug('DEBUG:: Not removing cache folder: {}. It does not exist!!'.format(self.cacheFolder)) # for od in self.inputList: dlink = od[self.DLINK].strip().strip('"') # download link albumartist = od[self.ALBUMARTIST].strip().strip('"') # album artist album = od[self.ALBUM].strip().strip('"') # album name song = od[self.TITLE].strip().strip('"') # song name artist = od[self.ARTIST].strip().strip('"') # artist genre = od[self.GENRE].strip().strip('"') # genre # date = od[self.DATE].strip().strip('"') # year year = od[self.YEAR].strip().strip('"') # year cover = od[self.PICTURE].strip().strip('"') # cover picture # get download type getaudio, getvideo = self.get_dl_type(od[self.DLTYPE].strip().strip('"')) # non-alphanumeric change to underscore albumartist_fname = re.sub('[^0-9a-zA-Z]+', '_', albumartist) # albumartist_fpath: output/albumartist_fname/ albumartist_fpath = os.path.join(self.outputFolder, albumartist_fname) album_fname = re.sub('[^0-9a-zA-Z]+', '_', album) # album_fpath: output/albumartist_fname/album_fname/ album_fpath = os.path.join(albumartist_fpath, album_fname) song_fname = re.sub('[^0-9a-zA-Z]+', '_', song) # title (without file ext) song_fpath = os.path.join(album_fpath, song_fname) # title full path (without file ext) # audio_tmp_albumartist_fpath = os.path.join(self.tempAudioFolder, albumartist_fname) # folder audio_tmp_album_fpath = os.path.join(audio_tmp_albumartist_fpath, album_fname) # folder audio_tmp_fpath = os.path.join(audio_tmp_album_fpath, song_fname) # folder video_tmp_albumartist_fpath = os.path.join(self.tempVideoFolder, albumartist_fname) # folder video_tmp_album_fpath = os.path.join(video_tmp_albumartist_fpath, album_fname) # filename, not folder video_tmp_fpath = os.path.join(video_tmp_album_fpath, song_fname) # filename, not folder # for d in (video_tmp_albumartist_fpath, video_tmp_album_fpath, audio_tmp_albumartist_fpath, audio_tmp_album_fpath, albumartist_fpath, album_fpath): try: os.makedirs(d) except OSError: if not os.path.isdir(d): continue # isyoutube = self.isYoutubeLink(dlink) if self.convert_to_mkv is True or isyoutube is True: converttomkv = True else: converttomkv = False # try: # VIDEO (including it's audio, of course) self.downloaded_fname = None if getvideo: self.logger.info('') self.logger.info('INFO:: Processing Video "{}"'.format(song)) # best guest if we already downloaded video file from previous runs videotmpfpath = self.downloaded_video_file_exist(video_tmp_fpath) # best guest if we already have target normalized video file videofpath = self.downloaded_video_file_exist(song_fpath) # # 1. Download video. Input: youtube link, output: file in videotmpfpath folder # if videofpath is not None: self.logger.debug('DEBUG:: Skip: Target video file "{}" already exist!'.format(videofpath)) else: if videotmpfpath is not None: self.logger.debug('DEBUG:: Skip downloading: Video file "{}" already exist!'.format(videotmpfpath)) else: self.logger.info('INFO:: Downloading video: {} ...'.format(song)) vpostprocessors = [] ydl_video_opts = { 'outtmpl' : video_tmp_fpath + '.%(ext)s' } # if converttomkv: vpostprocessors.append({ 'key' : 'FFmpegVideoConvertor', 'preferedformat': 'mkv' }) # if isyoutube: ydl_video_opts.update({'format' : 'bestvideo+bestaudio'}) ydl_video_opts.update({'writesubtitles' : True}) # --write-sub ydl_video_opts.update({'embedsubtitles' : True}) # --embed-subs # ydl_video_opts.update({'subtitlesformat' : 'srt'}) ydl_video_opts.update({'subtitleslangs' : ['en']}) vpostprocessors.append({'key' : 'FFmpegEmbedSubtitle'}) # if vpostprocessors: ydl_video_opts.update({'postprocessors' : vpostprocessors}) # self.logger.debug('DEBUG:: Downlink: {}'.format(dlink)) self.logger.debug('DEBUG:: Options: {}'.format({**ydl_opts, **ydl_video_opts})) try: with youtube_dl.YoutubeDL({**ydl_opts, **ydl_video_opts}) as ydl: ydl.download([dlink]) except youtube_dl.utils.YoutubeDLError: raise DLYoutubeDLError('Abort downloading Video "{}"'.format(song)) except Exception: raise # we now know the actual downloaded filename including the extension if converttomkv: # youtube downloaded filename: *.f251.mp4 and *.f251.webm videotmpfpath = video_tmp_fpath + '.mkv' else: # non youtube downloaded filename: *.mp4 videotmpfpath = self.downloaded_fname # output of YoutubeDL, as input for FFmpegNormalize # # _ , ext = os.path.splitext(videotmpfpath) ext = '.mkv' if converttomkv else os.path.splitext(videotmpfpath)[1] videofpath = song_fpath + ext # target output of FFmpegNormalize # Delete existing target normalized video, if any # try: # os.remove(videofpath) # except OSError: # pass # # 2. Normalize video. Input video from videotmpfpath, output video to videofpath # if os.path.isfile(videofpath): self.logger.debug('DEBUG:: Skip normalizing: Target video file "{}" already exist!'.format(videofpath)) else: if not isyoutube: # no need normalize if it's not youtube shutil.copyfile(videotmpfpath, videofpath) self.logger.info('INFO:: Copying done. Output file: {}'.format(videofpath)) else: self.logger.info('INFO:: Normalizing file {} ...'.format(videotmpfpath)) ffmpeg_normalize = FFmpegNormalize( dual_mono = True, progress = True, audio_codec = 'libmp3lame', # -c:a libmp3lame audio_bitrate = '320k', # -b:a 320k target_level = -14.0 # -t -14 ) ffmpeg_normalize.add_media_file(videotmpfpath, videofpath) ffmpeg_normalize.run_normalization() self.logger.info('INFO:: Normalizing done. Output file: {}'.format(videofpath)) # AUDIO (MP3) self.downloaded_fname = None if getaudio and isyoutube: self.logger.info('') self.logger.info('INFO:: Processing Audio "{}"'.format(song)) # 3. Download Audio audiotmpfpath = audio_tmp_fpath + '.mp3' audiofpath = song_fpath + '.mp3' if os.path.isfile(audiofpath): self.logger.debug('DEBUG:: Skip: Target audio file "{}" already exist!'.format(audiofpath)) else: if os.path.isfile(audiotmpfpath): self.logger.debug('DEBUG:: Skip downloading: Audio file "{}" already exist!'.format(audiotmpfpath)) else: self.logger.info('INFO:: Downloading audio "{}" ...'.format(song)) ydl_audio_opts = { 'format' : 'bestaudio', 'verbose' : True, 'postprocessors' : [{ 'key' : 'FFmpegExtractAudio', 'preferredcodec' : 'mp3', 'preferredquality': '320', 'nopostoverwrites': False }], 'outtmpl' : audio_tmp_fpath + '.%(ext)s' } pp = pprint.PrettyPrinter(indent=2) self.logger.debug('DEBUG:: YoutubeDL Options: ' + pp.pformat({**ydl_opts, **ydl_audio_opts})) try: with youtube_dl.YoutubeDL({**ydl_opts, **ydl_audio_opts}) as ydl: ydl.download([dlink]) except youtube_dl.utils.YoutubeDLError: raise DLYoutubeDLError('Abort downloading Audio "{}"'.format(song)) except Exception: raise # 4. Update ID3 tag self.logger.info('INFO:: Updating MP3 ID3 tag ...') audio = ID3(audiotmpfpath) audio[self.ALBUMARTIST] = TPE2(encoding=3, text=albumartist) audio[self.ALBUM] = TALB(encoding=3, text=album) audio[self.TITLE] = TIT2(encoding=3, text=song) audio[self.ARTIST] = TPE1(encoding=3, text=artist) audio[self.GENRE] = TCON(encoding=3, text=genre) audio[self.YEAR] = TYER(encoding=3, text=year) audio[self.DATE] = TDAT(encoding=3, text=year) audio[self.DLINK] = LINK(encoding=3, url=dlink) try: with open(os.path.join(self.coverFolder, cover), 'rb') as albumart: audio['APIC'] = APIC(encoding=3, mime='image/jpg', type=3, desc=u'Cover', data=albumart.read()) except Exception as e: self.logger.debug('DEBUG:: Skipped album art file error: {}'.format(e)) # try: audio.save() self.logger.info('INFO:: Audio ID3 Tag completed on file: {}'.format(audiotmpfpath)) self.logger.info('INFO:: Title : ' + audio[self.TITLE].text[0]) self.logger.info('INFO:: Album : ' + audio[self.ALBUM].text[0]) self.logger.info('INFO:: Album Artist : ' + audio[self.ALBUMARTIST].text[0]) self.logger.info('INFO:: Artist : ' + audio[self.ARTIST].text[0]) self.logger.info('INFO:: Genre : ' + audio[self.GENRE].text[0]) self.logger.info('INFO:: Year : ' + audio[self.YEAR].text[0]) self.logger.info('INFO:: Date : ' + audio[self.DATE].text[0]) self.logger.info('INFO:: Link : ' + audio[self.DLINK].url) except Exception: self.logger.error('ERROR:: Error on saving ID3 tag!') # # Since we've updated the metadata, we need to normalize again. # Hence, delete target try: os.remove(audiofpath) except OSError: pass # 5. Normalize audio if os.path.isfile(audiofpath): self.logger.debug('DEBUG:: Skip normalizing: Audio file "{}" already exist!'.format(audiofpath)) else: self.logger.info('INFO:: Normalizing file: {}'.format(audiotmpfpath)) ffmpeg_normalize = FFmpegNormalize( dual_mono = True, progress = True, audio_codec = 'libmp3lame', # -c:a libmp3lame audio_bitrate = '320k', # -b:a 320k target_level = -14.0 # -t -14 ) ffmpeg_normalize.add_media_file(audiotmpfpath, audiofpath) ffmpeg_normalize.run_normalization() self.logger.info('INFO:: Normalizing done. Output file: {}'.format(audiofpath)) # except DLYoutubeDLError as e: self.logger.error('ERROR:: DLYoutubeDLError {}'.format(e)) except Exception as e: self.logger.error('ERROR:: {}'.format(e))
# set ID3 tags try: tags = ID3(showInfo['parts'][partNo]['filepath'] + ".part") tags.delete() except ID3NoHeaderError: tags = ID3() tags.add(TRSN(text=[stationInfo['name']])) tags.add(TPE1(text=[stationInfo['name']])) tags.add(TALB(text=[showInfo['name']])) tags.add( TRCK(text=[str(partNo + 1) + "/" + str(len(showInfo['parts']))])) tags.add(TIT2(text=[showInfo['parts'][partNo]['title']])) tags.add(COMM(lang="deu", desc="desc", text=[showInfo['description']])) tags.add(TYER(text=[showInfo['start_dt'].strftime("%Y")])) tags.add(TDAT(text=[showInfo['start_dt'].strftime("%d%m")])) tags.add(TIME(text=[showInfo['start_dt'].strftime("%H%M")])) tags.add(TLEN(text=[showInfo['parts'][partNo]['duration_ms']])) tags.add(WOAS(url=showInfo['website'])) tags.add(WORS(url=stationInfo['website'])) for chapter in showInfo['parts'][partNo]['chapters']: tags.add( CHAP(element_id=chapter["id"], start_time=chapter["start_ms"], end_time=chapter["end_ms"], sub_frames=[TIT2(text=[chapter["title"]])])) tocList = ",".join([ chapter["id"] for chapter in showInfo['parts'][partNo]['chapters'] ])