def lookup(url, tracks_filename): opener = build_opener() opener.addheaders = [('User-agent', 'Album-Splitter')] page_html = opener.open(url).read() soup = BeautifulSoup(page_html, 'html.parser') song_table = soup.find_all(class_='tracklist') if not song_table: return None rows = song_table[0].find_all('tr') # first line of table is a headerrows[0] numberOfColumns = find_numberOfColumns(rows[0]) del(rows[0]) track_titles = [] track_times = [] for row in rows: tds = row.find_all('td') if len(tds) == numberOfColumns: try: track_titles.append(tds[1].get_text()) except(TypeError): break track_times.append(tds[numberOfColumns-1].get_text()) splitutil.writeTracksToFile(track_times, track_titles, tracks_filename) return True
def lookup(url): opener = build_opener() opener.addheaders = [('User-agent', 'Album-Splitter')] page_html = opener.open(url).read() page = BeautifulSoup(page_html, 'html.parser') song_table = page.find(id='dmusic_tracklist_content') if not song_table: return None song_lines = song_table.findAll('tr') # first line of table is a header del(song_lines[0]) track_titles = [] track_times = [] for line in song_lines: track_titles.append(line.find(attrs={'class': 'TitleLink'}).text.strip()) track_times.append(line.find(id=re.compile("dmusic_tracklist_duration.*")).text.strip()) splitutil.writeTracksToFile(track_times, track_titles) return True
def lookup(url): opener = build_opener() opener.addheaders = [('User-agent', 'Album-Splitter')] page_html = opener.open(url).read() soup = BeautifulSoup(page_html, 'html.parser') song_table = soup.find_all(class_='tracklist') if not song_table: return None rows = song_table[0].find_all('tr') # first line of table is a header length_column = find_length_column(rows[0]) del(rows[0]) track_titles = [] track_times = [] for row in rows: try: track_titles.append(extract_title(row)) except(TypeError): break track_times.append(row.contents[length_column].contents[0]) splitutil.writeTracksToFile(track_times, track_titles) return True