def sendMail(to=MailToList, subject=None, contents=None, attachments=None, cc=None, bcc=None, preview_only=False, headers=None, newline_to_break=True): user = config.read(config_section='Email', config_option='user') password = config.read(config_section='Email', config_option='password') host = config.read(config_section='Email', config_option='host') port = config.read(config_section='Email', config_option='port') try: with email.SMTP(user=user, password=password, host=host, port=port) as m: m.send(to=to, subject=subject, contents=contents, attachments=attachments, cc=cc, bcc=bcc, preview_only=preview_only, headers=headers, newline_to_break=newline_to_break) logger.info(f'邮件发送成功!to:{to}') except Exception as e: logger.error(e)
def run(input_file, c, meta): p = HypatiaExoParser(input_file, meta['normalization']) if c: for star, elements, planets in p.next(): try: star.upsert(c) logger.info('Saved star, "%s"\n', star.columns['hip']) for catalogue, composition in elements: # Assumption 251024: Because of the way the records are updated, if a catalogue is updated say # from 'FeH 0.3 [Adamow et al. (2015)]' to 'FeH 0.3 [Adamow et al. (2016)]' a new catalogue is # added and the particular composition for that catalogue will still be present with # old catalogue in the composition table. For the above example the table composition will have # 2 entries, one with 2015 catalogue and one with 2016 # Fix: Delete the star completely and add it again. Deleting a star, also deletes the corresponding # composition elements, but catalogues are retained as other stars may still use it! cid = catalogue.upsert(c) composition.set('hip', star.columns['hip']) composition.set('cid', cid) composition.upsert(c) for planet in planets: planet.set('hip', star.columns['hip']) planet.upsert(c) except: logger.exception('Saving star failed: "%s"', star.columns['hip']) c.close()
def getAlbum(savefilename, playlistindex, tracklist, flags, genre='', reset=True): metadata, tracklist = tracklist[0], tracklist[1:] metadata = list(metadata) + [genre] s = ParseTables.writeextendedtracklist("%s.txt" % os.path.join(dumpdir, savefilename), flags, metadata, tracklist) logger.info("\n%s" % s) playlistindex = int(playlistindex) albumlen = sum((s[1] for s in tracklist)) logger.info("Length: %d" % albumlen) if playlistindex > vs['Constants']['maxplaylists']: logger.error("Playlist index must be %d or below" % vs['Constants']['maxplaylists']) sys.exit(2) MouseDo(' '.join([Wait(2), Window(1), Move(*vs['Audacity']['record']), Click(), Wait(5), Window(0), Play(playlistindex), Wait(2), Move(*vs['Grooveshark']['safespot']) ])) if albumlen > 3600: Idle(albumlen + 30) else: time.sleep(albumlen + 30) MouseDo(' '.join([Move(100,100), Wait(1), Window(1), Move(*vs['Audacity']['stop']), Click()])) if reset: savepath = os.path.join(dumpdir, "%s.wav" % savefilename) MouseDo(' '.join([Move(*vs['Audacity']['file']), Click(), Move(*vs['Audacity']['export']), Click(), Type(savepath), Enter(), Enter(), Move(*vs['Audacity']['middle']), Wait(albumlen / 35), Click(), Toggle('c'), Type('z'), Toggle('c'), Window(0), Move(*vs['Grooveshark']['empty']), Click(), Wait(1), Window(2) ])) if split: splitTrack(savefilename)
def checkcommands(c, pr=True): i = 0 for savefilename, genre, playlistindex, metadata, flags in c: status, e = ParseTables.canread(metadata) good = True if not status: logger.error("Can't read metadata from %s. (%s)" % (metadata, str(e))) good = False elif pr: Util.foldtracklist(e, genre, flags) logger.info("%s\n%s\n" % (playlistindex, '\n'.join(['\t'.join(map(str, row)) for row in e]))) i += 1 return good
def __init__(self, filename): self.fileloc = filename self.wavfile = wave.open(filename, 'r') p = self.wavfile.getparams() self.numchannels, self.sampwidth, self.framerate, self.numframes = p[:4] self.structformat = '<' + ''.join((formats[self.sampwidth] for i in xrange(self.numchannels))) self.framesize = self.sampwidth * self.numchannels logger.info("Channels: %d\tSample Width: %d\tFrames: %d" % (self.numchannels, self.sampwidth, self.numframes)) self.silences = list() self.samples = np.empty((self.numframes,2), dtype=np.int16) self.tracks = list() self.trackmaxes = list()
def main(): i = 0 if record: CheckCommands.checkcommands(commands, pr=False) for alname, genre, playlistindex, metadata, flags in commands: savefilename = Util.removepunctuation(alname.strip().replace(' ', '').lower()) logger.info("Saving to: %s.wav" % os.path.join(dumpdir, savefilename)) tracklist = ParseTables.gettracklist(metadata) logger.info("Metadata found at %s" % metadata) if noreset and i == len(commands)-1: reset = False else: reset = True getAlbum(savefilename, playlistindex, tracklist, flags, genre, reset) elif split: savefilename = Util.removepunctuation(sys.argv[1].strip().replace(' ', '').lower()) splitTrack(savefilename) i += 1
def addmetadata(folder, tracklist, metadata, num, discno): files = os.listdir(folder) trackcount = len(tracklist) for f in files: pre, suf = f.rsplit('.', 1) numeric = numre.match(pre) if suf in endings: fullname = os.path.join(folder, f) audio = MP3(fullname, ID3=EasyID3) if numeric: tn = int(numeric.group(1)) audio['tracknumber'] = str(tn) else: tn = int(audio["tracknumber"][0].split('/')[0]) title = tracklist[tn-1][0] audio['title'] = unicode(title, 'utf-8') audio['artist'] = unicode(metadata[0], 'utf-8') audio['album'] = unicode(metadata[1], 'utf-8') audio['date'] = unicode(str(metadata[2]), 'utf-8') audio['tracknumber'] = unicode("%d/%d" % (tn, trackcount), 'utf-8') if len(metadata) == 4: audio['genre'] = metadata[3] newf = title + os.extsep + suf newf = Util.filterchars(newf) if num: if discno: newf = "%d-%s %s" % (discno, str(tn).zfill(2), newf) else: newf = "%s %s" % (str(tn).zfill(2), newf) newpath = os.path.join(folder, newf) if len(newpath) > 255: title = title[:len(title) - (len(newpath) - 255)] newf = title + os.extsep + suf newf = Util.filterchars(newf) if num: newf = "%s %s" % (str(tn).zfill(2), newf) audio.save() logger.info(newf) os.rename(os.path.join(folder, f), os.path.join(folder, newf))
def testsplit(realtracks, wavfilename): tracks = WAVSplitter(wavfilename).parsewav() for i, t in enumerate(tracks): if t['sharpstart']: if t['tstart'] < realtracks[2*i][0] or t['tstart'] > realtracks[2*i][-2]: if not realtracks[2*i][-1] == '|': logger.error("Track %d NOT smooth start as expected." % i) logger.error("Track %d sharp start (%d) NOT found in expected interval [%d, %d]" % (i, t['tstart'], realtracks[2*i][0], realtracks[2*i][-2])) else: logger.info("Track %d start found successfully." % i) else: failed = False if t['tstart'] > realtracks[2*i][-2]: logger.error("Track %d smooth start (%d) NOT found in expected interval [, %d]" % (i, t['tstart'], realtracks[2*i][0])) failed = True if not realtracks[2*i][-1] == '<': logger.error("Track %d NOT sharp start as expected." % i) failed = True if not failed: logger.info("Track %d start found successfully." % i) end = t['tend'] - 1 if t['sharpend']: if end < realtracks[2*i+1][0] or end > realtracks[2*i+1][-2]: if not realtracks[2*i+1][-1] == '|': logger.error("Track %d NOT smooth end as expected." % i) logger.error("Track %d sharp end (%d) NOT found in expected interval [%d, %d]" % (i, end, realtracks[2*i+1][0], realtracks[2*i+1][-2])) else: logger.info("Track %d end found successfully." % i) else: failed = False if end < realtracks[2*i+1][0]: logger.error("Track %d smooth end (%d) NOT found in expected interval [%d, ]" % (i, end, realtracks[2*i+1][0], realtracks[2*i+1][-2])) failed = True if not realtracks[2*i+1][-1] == '>': logger.error("Track %d NOT sharp end as expected." % i) failed = True if not failed: logger.info("Track %d end found successfully." % i)
import ParseTables import sys import Util from Config import logger, getcommands def checkcommands(c, pr=True): i = 0 for savefilename, genre, playlistindex, metadata, flags in c: status, e = ParseTables.canread(metadata) good = True if not status: logger.error("Can't read metadata from %s. (%s)" % (metadata, str(e))) good = False elif pr: Util.foldtracklist(e, genre, flags) logger.info("%s\n%s\n" % (playlistindex, '\n'.join(['\t'.join(map(str, row)) for row in e]))) i += 1 return good if __name__ == '__main__': if checkcommands(getcommands(sys.argv)): logger.info("All metadata successfully located.")
def next(self): """ Fetches the next star from Custom Hypatia format :return: Star instance """ with open(self.path) as f: raw_stars = f.read().strip().split("\n\n") # Assumption 251020: Each star is separated by ONE blank line logger.info("%s stars found in the file\n", len(raw_stars)) for i, raw_star in enumerate(raw_stars): try: logger.info("Started parsing star (%s)", i) s = Star(None) elements = [] # a list of catalogue and composition instances for the star planets = [] raw_star_attrs = raw_star.split("\n") # Assumption 251021: Each attribute of star is on a new line for raw_attr in raw_star_attrs: # Split attributes and values of the star. They are separated by a '=' or ':' # Once split, the last entity is value and the last but one is the key attr_value = re.split(r'=|:', raw_attr) if len(attr_value) > 1: key = attr_value[-2].lower().strip() # Handle Multiple Planets Key if attr_value[0].find('Multiple planets') >= 0: key = 'multiple planets' # Extract planet information if re.match(r'\[[a-z]\]', attr_value[0]): # if a line starts with [a-z] p = self._parse_planet(raw_attr, s.columns.get('hip', 'UNK')) if p: planets.append(p) value = attr_value[-1].strip() if key in self.column_map: # Splitting UVW and Position attributes if key == 'uvw': u, v, w = map(float, re.search(r'\((-?[0-9.]+), (-?[0-9.]+), (-?[0-9.]+)\)', value).groups()) s.set('u', self.handle_nulls(key, u)) s.set('v', self.handle_nulls(key, v)) s.set('w', self.handle_nulls(key, w)) elif key == 'position': x, y, z = map(float, re.search(r'\[(-?[0-9.]+), (-?[0-9.]+), (-?[0-9.]+)\]', value).groups()) s.set('x', self.handle_nulls(key, x)) s.set('y', self.handle_nulls(key, y)) s.set('z', self.handle_nulls(key, z)) else: # Handling NULL values value = self.handle_nulls(key, value) s.set(self.column_map[key], value) else: comp_cat = re.match(r'(\w+)(.*)\[(.+)\]', raw_attr) if comp_cat: element, value, author_year = comp_cat.groups() if re.match(r'blank', element): # ignore if element is marked as blank# continue catalogue = Catalogue(author_year) composition = Composition(self.solarnorm, None, None, element, value) elements.append((catalogue, composition)) else: logger.warning('Unknown composition of star, "%s". Line unable to parse: "%s"', s.columns.get('hip', 'UNK'), raw_attr) yield s, elements, planets except: # Pass the exception to be handled at the higher level raise
def next(self): """ Fetches the next star from Custom Hypatia format :return: Star instance """ with open(self.path) as f: raw_stars = f.read().strip().split( "\n\n" ) # Assumption 251020: Each star is separated by ONE blank line logger.info("%s stars found in the file\n", len(raw_stars)) for i, raw_star in enumerate(raw_stars): try: logger.info("Started parsing star (%s)", i) s = Star(None) elements = [ ] # a list of catalogue and composition instances for the star planets = [] raw_star_attrs = raw_star.split( "\n" ) # Assumption 251021: Each attribute of star is on a new line for raw_attr in raw_star_attrs: # Split attributes and values of the star. They are separated by a '=' or ':' # Once split, the last entity is value and the last but one is the key attr_value = re.split(r'=|:', raw_attr) if len(attr_value) > 1: key = attr_value[-2].lower().strip() # Handle Multiple Planets Key if attr_value[0].find('Multiple planets') >= 0: key = 'multiple planets' # Extract planet information if re.match(r'\[[a-z]\]', attr_value[0] ): # if a line starts with [a-z] p = self._parse_planet( raw_attr, s.columns.get('hip', 'UNK')) if p: planets.append(p) value = attr_value[-1].strip() if key in self.column_map: # Splitting UVW and Position attributes if key == 'uvw': u, v, w = map( float, re.search( r'\((-?[0-9.]+), (-?[0-9.]+), (-?[0-9.]+)\)', value).groups()) s.set('u', self.handle_nulls(key, u)) s.set('v', self.handle_nulls(key, v)) s.set('w', self.handle_nulls(key, w)) elif key == 'position': x, y, z = map( float, re.search( r'\[(-?[0-9.]+), (-?[0-9.]+), (-?[0-9.]+)\]', value).groups()) s.set('x', self.handle_nulls(key, x)) s.set('y', self.handle_nulls(key, y)) s.set('z', self.handle_nulls(key, z)) else: # Handling NULL values value = self.handle_nulls(key, value) s.set(self.column_map[key], value) else: comp_cat = re.match(r'(\w+)(.*)\[(.+)\]', raw_attr) if comp_cat: element, value, author_year = comp_cat.groups() if re.match( r'blank', element ): # ignore if element is marked as blank# continue catalogue = Catalogue(author_year) composition = Composition( self.solarnorm, None, None, element, value) elements.append((catalogue, composition)) else: logger.warning( 'Unknown composition of star, "%s". Line unable to parse: "%s"', s.columns.get('hip', 'UNK'), raw_attr) yield s, elements, planets except: # Pass the exception to be handled at the higher level raise
def splitTrack(savefilename): savepath = os.path.join(dumpdir, "%s.wav" % savefilename) wav = SplitTracks.WAVSplitter(savepath) tracks = wav.parsewav() notesfile = open(os.path.join(dumpdir, "%s-notes.txt" % savefilename), 'w') logger.debug(', '.join(["%f" % t['length'] for t in tracks]), notesfile) flags, metadata, tracklist = ParseTables.readextendedtracklist(os.path.join(dumpdir, "%s.txt" % savefilename)) album, artist, year, genre = metadata maxnamelen = max((len(t[0]) for t in tracklist)) #Match up the measured tracks with the "official" list, tracklist trackmappings = [None] * len(tracklist) tracksfailed = [False] * len(tracklist) lastmatched = 0 #Match any with similar track lengths #for i, t in enumerate(tracklist): # for j, track in enumerate(tracks[lastmatched:]): # if abs(track['length'] - t[1]) < vs['Constants']['tracklentoleranceseconds']: # trackmappings[i] = [lastmatched+j] # lastmatched = j #See if runs of tracks can be combined to match with official ones #Simple case if numbers of found and official tracks match if len(tracks) == len(tracklist): for i in xrange(len(tracklist)): trackmappings[i] = [i] else: def trackmappingbounds(i): j = i + 1 while j < len(trackmappings) and not trackmappings[j]: j += 1 k = i - 1 while k >= 0 and not trackmappings[k]: k -= 1 #print "bounds=[%d, %d)" % (k, j) if k < 0: if j < len(trackmappings): indexrange = range(trackmappings[j][0]) else: indexrange = range(len(tracks)) elif j >= len(trackmappings): indexrange = range(trackmappings[k][-1] + 1, len(tracks)) else: indexrange = range(trackmappings[k][-1]+1, trackmappings[j][0]) return indexrange for i, t in enumerate(tracklist): if trackmappings[i] is None: indexrange = trackmappingbounds(i) matched = False #indexrange is the range of track indices in tracks that may be part of official track i for j1 in range(len(indexrange)): for j2 in range(j1+1, len(indexrange)+1): if abs(sum((tracks[k]['length'] for k in indexrange[j1:j2])) - t[1]) < vs['Constants']['tracklentoleranceseconds']: trackmappings[i] = indexrange[j1:j2] #print "Succeed", i, trackmappings[i] matched = True for k, tracki in enumerate(trackmappings[i][:-1]): if tracks[trackmappings[i][k]]['sharpend'] and tracks[trackmappings[i][k+1]]['sharpstart']: track = tracks[tracki] logger.warn("Removing skip at %f in %s" % (float(track['tend'] - track['tstart']) / wav.framerate, tracklist[i][0]), notesfile) break if matched: break for i, t in enumerate(tracklist): if trackmappings[i] is None: indexrange = trackmappingbounds(i) trackmappings[i] = indexrange #print "Fail ", i, trackmappings[i] #print trackmappings logger.warn("Failed to locate track %d, %s" % (i+1, tracklist[i][0]), notesfile) tracksfailed[i] = True realtracks = list() for i, m in enumerate(trackmappings): failed = tracksfailed[i] if len(m) == 0: continue sharpstart = tracks[m[0]]['sharpstart'] sharpend = tracks[m[-1]]['sharpend'] startreason = tracks[m[0]]['startreason'] endreason = tracks[m[-1]]['endreason'] if sharpstart: startchar = '|' else: startchar = '<' if sharpend: endchar = '|' else: endchar = '>' trackslen = sum((tracks[t]['length'] for t in m)) diff = trackslen - tracklist[i][1] diffint = int(abs(diff)) if diff < 0: diffstr = "%.3f seconds %s" % (diff, '-' * diffint) else: diffstr = "%.3f seconds %s" % (diff, '+' * diffint) if failed: logger.warn("%d. %s (%d secs):" % (i+1, tracklist[i][0].ljust(maxnamelen), tracklist[i][1]) + " =/=\t%s%s Tracks %s, Length = %.3f secs (%s) %s%s" % (startchar, startreason, str(map(lambda x: x+1, m)), trackslen, diffstr, endreason, endchar), notesfile) else: logger.info("%d. %s (%d secs):" % (i+1, tracklist[i][0].ljust(maxnamelen), tracklist[i][1]) + " ==\t%s%s Tracks %s, Length = %.3f secs (%s) %s%s" % (startchar, startreason, str(map(lambda x: x+1, m)), trackslen, diffstr, endreason, endchar), notesfile) #Only remove actual skips and not intentional silence in tracks indexpairs = [ [tracks[m[0]]['tstart']] ] for j, t in enumerate(m[:-1]): if tracks[t]['sharpend'] and tracks[m[j+1]]['sharpstart']: indexpairs[-1].append(tracks[t]['tend']) indexpairs.append([tracks[m[j+1]]['tstart']]) indexpairs[-1].append(tracks[m[-1]]['tend']) track = wav.compoundslice(indexpairs) realtrack = dict() realtrack['indexpairs'] = indexpairs realtrack['sharpstart'] = tracks[m[0]]['sharpstart'] realtrack['sharpend'] = tracks[m[-1]]['sharpend'] realtrack['startreason'] = tracks[m[0]]['startreason'] realtrack['endreason'] = tracks[m[-1]]['endreason'] realtrack['length'] = sum((tracks[t]['length'] for t in m)) realtrack['tracknum'] = i+1 #Dynamics analysis realtrack['maxlevel'] = max((tracks[t]['maxlevel'] for t in m)) realtrack['rmsintervals'] = SplitTracks.rmsintervals(track, chunksize=wav.framerate, dbfs=True) realtrack['maxintervalrms'] = np.amax(realtrack['rmsintervals']) realtrack['dynamicrange'] = realtrack['maxlevel'] - realtrack['maxintervalrms'] realtrack['rms'] = SplitTracks.todbfs(SplitTracks.rms(track)) realtrack['dbtoamplify'] = 0.0 realtrack['limitlevel'] = 0.0 realtrack['residue'] = 0.0 realtrack['postlimitamplify'] = 0.0 #Skip if the track is slaved if ('p' in flags and i in flags['p']) or ('a' in flags and i in flags['a']): realtracks.append(realtrack) continue realtrack['hist'], bins = SplitTracks.peakhistogram(track, rangeratio=(0.5,1.0), proportional=True) realtrack['top10hist'], top10bins = SplitTracks.peakhistogram(track, rangeratio=(0.9,1.0), proportional=True) if np.amin(realtrack['top10hist']) > 0: realtrack['topratio'] = float(realtrack['top10hist'][-1]) / float(np.amax(realtrack['top10hist'][:-1])) else: realtrack['topratio'] = 0 i = realtrack['hist'].shape[0]-1 s = realtrack['hist'][i] while s <= vs['Constants']['proportionpeakstochop']: i -= 1 s += realtrack['hist'][i] realtrack['choplevel'] = SplitTracks.todbfs(2**(8*wav.sampwidth-1) * (0.5 + float(i) / (2 * realtrack['hist'].shape[0]))) logger.debug("\tMaxlevel: %.3f\tAverage RMS: %.3f\tMax RMS: %.3f\tTopratio: %.3f" % (realtrack['maxlevel'], realtrack['rms'], realtrack['maxintervalrms'], realtrack['topratio']), notesfile) realtracks.append(realtrack) #Can do analysis of the entire album's dynamics here maxlevel = max((rt['maxlevel'] for rt in realtracks)) maxrms = max((rt['maxintervalrms'] for rt in realtracks)) mindynamicrange = min((rt['dynamicrange'] for rt in realtracks)) mindynamicrange = min(vs['Constants']['mindynamicrange'], mindynamicrange) #Dynamics analysis for i, rt in enumerate(realtracks): if ('p' in flags and i in flags['p']) or ('a' in flags and i in flags['a']): continue #We need either the dbtoamplify (for flat tracks) or the information for #limiting and post-limiting amplification #The simple case: the track has already been peak limited, so just amplify #it to the desired max level if rt['topratio'] >= vs['Constants']['topratioforbrickwalled'] or rt['dynamicrange'] < vs['Constants']['mindynamicrange']: rt['dbtoamplify'] = rt['maxlevel'] * -1 + vs['Constants']['dbfstoamplify'] else: #Otherwise, set the limiting level to the level that #proportionpeakstochop occur above #Maximum amount of headroom we can lose from the peaks (will be negative) #Determined by the minimum dynamic range maxdbtochop = rt['dynamicrange'] - mindynamicrange minlimitlevel = 20 * math.log10((math.pow(10, float(-1 * maxdbtochop)/20) - 0.2)/0.8) limitlevel = max(rt['choplevel'], minlimitlevel) residue = vs['Constants']['baseresidue'] lllfbr = vs['Constants']['lowestlimitlevelforbaseresidue'] #Lower residual level if the chop is extreme enough if limitlevel <= lllfbr: residue *= max(1.0 - 0.75 * (lllfbr - limitlevel) / (-1 * lllfbr), 0.25) #Calculate level to amplify to reach full scale limit = float(SplitTracks.fromdbfs(limitlevel)) newlimit = limit + (2**(8*wav.sampwidth - 1) - limit) * residue newmaxlevel = SplitTracks.todbfs(newlimit) postlimitamplify = -1 * newmaxlevel - rt['maxlevel'] hllffs = vs['Constants']['highestlimitlevelforfullscale'] if limitlevel >= hllffs: postlimitamplify += (hllffs - limitlevel)/(-1 * hllffs) rt['limitlevel'] = limitlevel rt['residue'] = residue rt['postlimitamplify'] = postlimitamplify #Build runs if continuous or 'c' in flags: runs = [0] * len(realtracks) runnum = 1 else: runs = [None] * len(realtracks) runnum = 0 for i, rt in enumerate(realtracks): #Skip if part of a run if runs[i] is not None: continue #Look for tracks that connect to this one (unless it is not like its #neighbors) j = i-1 while j >= 0 and ((realtracks[j+1]['sharpstart'] and realtracks[j]['sharpend']) or ('p' in flags and j in flags['p']) or ('a' in flags and j+1 in flags['a'])): j -= 1 lowerbound = j + 1 j = i+1 while j < len(realtracks) and (realtracks[j-1]['sharpend'] and realtracks[j]['sharpstart'] or ('p' in flags and j-1 in flags['p']) or ('a' in flags and j in flags['a'])): j += 1 upperbound = j if upperbound - lowerbound > 1: for j in xrange(lowerbound,upperbound): runs[j] = runnum runnum += 1 #Connect all runs for i in xrange(runnum): run = [realtracks[j] for j in xrange(len(runs)) if runs[j] == i and ('q' not in flags or j not in flags['q'])] limitedrun = [t for t in run if t['dbtoamplify'] != 0.0] nonlimitedrun = [t for t in run if t['limitlevel'] != 0.0] if len(limitedrun) > 0: dbta = min((t['dbtoamplify'] for t in limitedrun)) for tr in run: tr['dbtoamplify'] = dbta elif len(nonlimitedrun) > 0: mindex = 0 maxl = nonlimitedrun[0]['limitlevel'] for j, t in enumerate(nonlimitedrun[1:]): if (t['limitlevel'] > maxl and t['limitlevel'] != 0.0) or maxl == 0.0: maxl = t['limitlevel'] mindex = j + 1 for tr in run: tr['limitlevel'] = nonlimitedrun[mindex]['limitlevel'] tr['residue'] = nonlimitedrun[mindex]['residue'] tr['postlimitamplify'] = nonlimitedrun[mindex]['postlimitamplify'] if len(run) > 0: ml = min((t['maxlevel'] for t in run)) for tr in run: rt['maxlevel'] = ml #Apply dynamics changes and save tracks for i, rt in enumerate(realtracks): if rt['dbtoamplify'] != 0.0: logger.info("Amplifying track %d by %.3f dB" % (rt['tracknum'], rt['dbtoamplify']), notesfile) else: logger.info("Limiting track %d to %.3f dBfs (r=%.2f) and amplifying by %.3f dB" % (rt['tracknum'], rt['limitlevel'], rt['residue'], rt['postlimitamplify']), notesfile) if not test: savename = os.path.join(dumpdir, "%s%d-%s.wav" % (savefilename[:3], i, savefilename)) wav.saveslice(rt['indexpairs'], savename, rt['sharpstart'], rt['sharpend'], rt['maxlevel'], rt['dbtoamplify'], rt['limitlevel'], rt['residue'], rt['postlimitamplify']) #Util.mp3encode(savename, tracklist[i][0], artist=artist, album=album, year=year, trackno=rt['tracknum'], trackcount=len(tracklist), genre=genre) notesfile.close()
import ParseTables import SplitTracks import Util import CheckCommands usage = "python %s [savefilename] [playlistindex] [metadataloc]" % sys.argv[0] from Config import vs, thisdir, dumpdir, windows, logger, getcommands if record: commands = getcommands(sys.argv) if windows: import subprocess if noreset: logger.info("Not resetting last track.") if not record: logger.info("Skipping record.") if not split: logger.info("Skipping split.") if test: logger.info("Testing, not saving any new tracks.") if continuous: logger.info("Mixing all tracks the same.") def MouseDo(cmdstring, debug=False): if debug: logger.debug(cmdstring) if windows: subprocess.call(["java", "-cp", os.environ["JAVA"], "RemoteControlRobot"] + cmdstring.split(' ')) else:
def parsewav(self): self.wavfile.setpos(0) now = time.time() silencestart = None firsttrack = True runningmax = 0 #Contains readframes paired frames at a time framebuf = self.wavfile.readframes(readframes) framesread = 0 #Size of buffer to check for silence (in paired frames) silencebufsize = silenceframes while framebuf: #framebuf size in paired frames framebufsize = len(framebuf) / (self.sampwidth * self.numchannels) for i in xrange(0, framebufsize, silencebufsize): imax = min(self.samples.shape[0], framesread+i+silencebufsize) #Copy data from framebuf into self.samples self.samples[framesread+i:imax] = np.reshape(np.frombuffer(framebuf, dtype=np.int16, count=min(silencebufsize * self.numchannels, (framebufsize - i) * self.numchannels), offset=i*self.sampwidth*self.numchannels), (-1,2)) #Take absolute values into silence buffer silencebuf = np.absolute(self.samples[framesread+i:imax]) #Check max level in silence buffer maxlevel = np.amax(silencebuf) runningmax = max(runningmax, maxlevel) if maxlevel < silencethreshold and (runningmax > mintracklevel or firsttrack) and silencestart is None: silencestart = framesread + i self.trackmaxes.append(runningmax) firsttrack = False elif maxlevel >= 2*silencethreshold and silencestart is not None: self.silences.append((silencestart, framesread + i)) silencestart = None runningmax = 0 framesread += framebufsize framebuf = self.wavfile.readframes(readframes) if silencestart is not None: self.silences.append((silencestart, self.numframes)) self.trackmaxes.append(runningmax) nnow = time.time() logger.info("Parsed in %f seconds." % (nnow - now)) logger.info("Determining track splits...") trackindex = 0 trackl = list(list()) if len(self.trackmaxes) <= len(self.silences): self.trackmaxes.append(self.trackmaxes[-1]) for sframe, eframe in self.silences: #Decide starting frame for checking end of previous track i = sframe trackmax = self.trackmaxes[trackindex] levelthreshold = int(nearsilence * float(trackmax) / 2**(8*self.sampwidth-1)) while i >= 0 and np.amax(np.absolute(self.samples[i:i+1])) < levelthreshold: i -= 1 start = i - tracksnippadding sframe += tracksnippadding #Decide ending frame for checking beginning of next track i = eframe trackmax = self.trackmaxes[trackindex+1] levelthreshold = int(nearsilence * float(trackmax) / 2**(8*self.sampwidth-1)) while i < self.numframes - 1 and np.amax(np.absolute(self.samples[i:i+1])) < levelthreshold: i += 1 eframe -= tracksnippadding end = i + tracksnippadding trackindex += 1 silencelen = eframe - sframe silencestart = None silenceend = None #print start, sframe, eframe, end if start > 0: fadeout = self.slicewav(start, sframe) #plotarr(fadeout, seconds=False) silencestart, startjump, reason = findtrackend(fadeout, start, silencelen, direction=-1, framerate=self.framerate) silencestart += 1 #Upper bound of a track is exclusive trackl[-1].append((silencestart, startjump, reason)) if end < self.numframes: fadein = self.slicewav(eframe, end) #plotarr(fadein, seconds=False) silenceend, endjump, reason = findtrackend(fadein, eframe, silencelen, direction=1, framerate=self.framerate) trackl.append([(silenceend, endjump, reason)]) if len(trackl[-1]) == 1: trackl[-1].append((self.numframes, True, "---")) now = time.time() logger.info("Split in %f seconds." % (now - nnow)) for i, t in enumerate(trackl): self.tracks.append(dict()) self.tracks[-1]['tstart'] = t[0][0] self.tracks[-1]['tend'] = t[1][0] self.tracks[-1]['length'] = float(t[1][0] - t[0][0]) / self.framerate self.tracks[-1]['sharpstart'] = t[0][1] self.tracks[-1]['sharpend'] = t[1][1] self.tracks[-1]['startreason'] = t[0][2] self.tracks[-1]['endreason'] = t[1][2] self.tracks[-1]['maxlevel'] = todbfs(self.trackmaxes[i+1]) return self.tracks