def get_episodes(id): cache_url = common.cleanfilename(id) episodes = json_handle.load_json(site, cache_url+'episodes', cache_time=1) if episodes: return episodes else: episodes = [] try: content = requests.get(base_url+id, timeout=timeout).text url = re.findall('<ul class="streams"><li><a href="(.*?)"', content, re.DOTALL)[0] url = url.replace('amp;','') content = requests.get(base_url+url, timeout=timeout).text streamlist = re.findall('<div class="title"><h2>Streams(.*?)</ul>', content, re.DOTALL)[0] items = re.findall('<stro(.*?)</li>', streamlist, re.DOTALL) for item in items: url = re.findall('href="(.*?)"', item)[0] title = re.findall('ng>(.*?)</strong>', item)[0] try: episode = re.findall(r'\d+', title)[0] while episode.startswith('0'): episode = episode[1:] except: episode = 0 episodes.append({'name': title, 'url': url, 'episode': episode}) if episodes: json_handle.save_json(site, cache_url+'episodes', episodes) except: pass return episodes
def get_anime_list(): cache_url = common.cleanfilename(streams_url) anime_list = json_handle.load_json(site, cache_url, cache_time=7) if anime_list: return anime_list else: anime_list = [] try: content = requests.get(streams_url).text re_pages = re.findall('<span class="pagenav-pages">Seite 1 von (\d+)</span>', content, re.DOTALL)[0] for i in range(1, int(re_pages)+1): url = streams_url+'&page='+str(i) content = requests.get(url, timeout=timeout).text index = re.findall('<table class="index-gallery">(.*?)</table>', content, re.DOTALL)[0] items = re.findall('<td><a(.*?)</td>', index, re.DOTALL) for item in items: #if 'TV,' in item: url = re.findall('href="(.*?)"', item, re.DOTALL)[0] cover = re.findall('<img src="(.*?)"', item, re.DOTALL)[0] cover = cover.replace('thumb','full') name = re.findall('alt="(.+?)"', item, re.DOTALL)[0] try: year = re.findall('advinfo mtA.*?([0-9]{4})', item, re.DOTALL)[0] except: year = '' try: genre = re.findall('advinfoB.*?gt;(.*?)<', item, re.DOTALL)[0] except: genre = '' anime_list.append({'site': site, 'original': name, 'id': url, 'cover': cover, 'year': year, 'genre': genre}) if anime_list: json_handle.save_json(site, cache_url, anime_list) except: pass return anime_list
def get_artists(query): artists = [] try: params = { 'method': 'artist.search', 'artist': query, 'api_key': api_key, 'format': 'json', 'limit': '25' } json_data = requests.get(api_url, params=params).json() for a in json_data['results']['artistmatches']['artist']: artist = a['name'] image = a['image'][-1]['#text'] listeners = a['listeners'] if image: artists.append({ 'artist': artist, 'image': image, 'listeners': listeners }) except: pass artists = remove_duplicates(artists) artists = sorted(artists, key=lambda k: int(k['listeners']), reverse=True) return artists
def get_similar_artists(artist): n = '100' value = cache.get_value(artist, n, lastfm=True) if value == None: value = [] try: params = { 'method': 'artist.getsimilar', 'artist': artist, 'autocorrect': '1', 'limit': n, 'api_key': api_key, 'format': 'json' } json_data = requests.get(api_url, params=params).json() for item in json_data['similarartists']['artist']: value.append(item['name']) cache.save_value(artist, n, value, lastfm=True) except: pass similar_artists = [] for s in value: similar_artists.append(s) if len(similar_artists) == int(common.limit_artists()): break return similar_artists
def get_artists_by_tag(tag): n = '500' value = cache.get_value(tag, n, lastfm='tag') if value == None: value = [] try: params = { 'method': 'tag.gettopartists', 'tag': tag, 'api_key': api_key, 'format': 'json', 'limit': n } json_data = requests.get(api_url, params=params).json() for a in json_data['topartists']['artist']: try: value.append({'artist': a['name']}) except: pass cache.save_value(tag, n, value, lastfm='tag') except: pass artists = [] for a in value: artists.append(a) if len(artists) == int(common.limit_tag()): break return artists
def _load_json_script(self, java_script_url): headers = { 'Connection': 'keep-alive', 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.36 Safari/537.36', 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8', 'DNT': '1', 'Accept-Encoding': 'gzip, deflate', 'Accept-Language': 'en-US,en;q=0.8,de;q=0.6' } url = java_script_url if not 'youtube.com' in url: url = 'https://www.youtube.com/' + url pass if not url.startswith('http'): url = 'https://' + url pass java_script = '' try: result = requests.get(url, headers=headers, verify=False, allow_redirects=True) java_script = result.text except: pass return self._load_java_script(java_script)
def get_anime_info(id): cache_url = common.cleanfilename(id) anime = json_handle.load_json(site, cache_url, cache_time=7) if anime: return anime else: anime = [] try: content = requests.get(base_url+id, timeout=timeout).text c = id.split('page=')[-1] name = re.findall('<article><h4>(.+?)</h4></article>', content, re.DOTALL)[0] cover = 'http://tavernakoma.net/images/player/%s.jpg' % c try: beschreibung = re.findall('<div class="anime-description">(.*?)</div>', content, re.DOTALL)[0] except: beschreibung = '' try: genres = [] genrelist = re.findall('<ul id="genrelist">(.*?)</ul>', content, re.DOTALL)[0] match = re.findall('<li>(.*?)</li>', genrelist, re.DOTALL) for genre in match: genres.append(genre) except: genres = [] anime = ({'site': site, 'original': name, 'id': id, 'cover': cover, 'beschreibung': beschreibung, 'genre': genres}) if anime: json_handle.save_json(site, cache_url, anime) except: pass return anime
def get_videos(artist): videos = [] url = 'https://www.googleapis.com/youtube/v3/search' headers = {'Host': 'www.googleapis.com', 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.36 Safari/537.36', 'Accept-Encoding': 'gzip, deflate'} params = {'part':'snippet','type':'video','maxResults':'50', 'q':'%s video' % artist,'key':'AIzaSyCky6iU_p2VjvpXwTSOpPVLsGFIdR51lQE', } try: json_data = requests.get(url, params=params, headers=headers).json() except: return False try: items = json_data['items'] videos = add_videos(videos, items, artist) if len(videos) > 1: json_data,items = get_more_items(json_data, url, params, headers) videos = add_videos(videos, items, artist) if len(videos) > 3: json_data,items = get_more_items(json_data, url, params, headers) videos = add_videos(videos, items, artist) except: pass return videos
def get_videos(artist): videos = [] q = re.sub('[^\s0-9a-zA-Z]+', '', artist) url = 'http://www.clipfish.de/devmobileapp/searchvideos/%s/mostrecent/1/100' % q try: json_data = requests.get(url).json() except: return False try: result = json_data['videos'] for r in result: try: t = r['title'] match = t.split(' - ') name = match[0].strip() if name.encode('utf-8').lower() == artist.lower(): title = match[1] if len(match) > 2: try: title = title+match[2] except: pass id = r['video_url_wifi_quality'] image = r['media_thumbnail'] duration = 0 try: duration = r['media_length'] except: pass if int(duration) > 150: videos.insert(0, {'site':site, 'artist':[name], 'title':title, 'duration':duration, 'id':id, 'image':image}) except: pass except: pass return videos
def get_anime_info(id): cache_url = common.cleanfilename(id) anime = json_handle.load_json(site, cache_url, cache_time=7) if anime: return anime else: anime = [] try: content = requests.get(base_url+id, timeout=timeout).text match = re.findall('<li id="animedescriptionimg">.*?<img src="(.*?)" alt="(.+?)"', content, re.DOTALL) name = match[0][1] cover = match[0][0] try: plot = re.findall('Inhalt.*?<p>(.*?)</p>', content, re.DOTALL)[0] except: plot = '' try: genres = re.findall('Genre:.*?<p>(.*?)</p>', content, re.DOTALL)[0] try: genre = genres.split(',') except: genre = [genres] except : genre = [] try: year = re.findall('Release:.*?<p>(.*?)</p>', content, re.DOTALL)[0] year = year.split('.')[-1] except: year = '' anime = ({'site': site, 'original': name, 'id': id, 'cover': base_url+cover, 'beschreibung': plot, 'genre': genre, 'year': year}) if anime: json_handle.save_json(site, cache_url, anime) except: pass return anime
def get_episodes(id): episodes = [] try: content = requests.get(base_url+id, timeout=timeout).text folgen = re.findall('<h2>Folgen</h2>.*?href="(.*?)"', content, re.DOTALL)[0] content = requests.get(base_url+folgen, timeout=timeout).text item = re.findall('file: "(.+?)".*?title: "(.+?)".*?description: "(.+?)"', content, re.DOTALL) for file, title, description in item: name = title+' - '+description episode = re.findall(r'\d+', title)[0] while episode.startswith('0'): episode = episode[1:] episodes.append({'name': name, 'url': file, 'episode': episode}) except: pass return episodes
def get_artist_genre(artist): genre_list = cache.get_value(artist, 'genre', lastfm='tag') if genre_list == None: genre_list = [] try: params = { 'method': 'artist.gettoptags', 'artist': artist, 'api_key': api_key, 'format': 'json' } json_data = requests.get(api_url, params=params).json() for tag in json_data['toptags']['tag']: genre = tag['name'] list = [ 'german', 'deutsch', 'seen live', 'made in germany', 'japanese', 'anime', 'vocalists', 'songwriter' ] if not any(x in genre.lower() for x in list): genre_list.append(genre) if len(genre_list) == 5: break cache.save_value(artist, 'genre', genre_list, lastfm='tag') except: pass return genre_list
def get_hoster(id): hoster = [] try: url = series_url+id json_data = requests.get(url, timeout=timeout).json() hoster = json_data['links'] except: pass return hoster
def get_episodes(id): episodes = [] try: url = series_url+id json_data = requests.get(url, timeout=timeout).json() episodes = json_data['epi'] except: pass return episodes
def get_video_url(id): video_url = None headers = { 'Host': 'www.youtube.com', 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.36 Safari/537.36', 'Referer': 'https://www.youtube.com', } params = {'v': id} url = 'https://youtube.com/watch' html = '' cookie = '' try: html = requests.get(url, params=params, headers=headers).text except: pass pos = html.find('<script>var ytplayer') if pos >= 0: html2 = html[pos:] pos = html2.find('</script>') if pos: html = html2[:pos] re_match_js = re.search(r'\"js\"[^:]*:[^"]*\"(?P<js>.+?)\"', html) js = '' cipher = None if re_match_js: js = re_match_js.group('js').replace('\\', '').strip('//') if not js.startswith('http'): js = 'http://www.youtube.com/%s' % js cipher = Cipher(java_script_url=js) re_match = re.search( r'\"url_encoded_fmt_stream_map\"\s*:\s*\"(?P<url_encoded_fmt_stream_map>[^"]*)\"', html) if re_match: url_encoded_fmt_stream_map = re_match.group( 'url_encoded_fmt_stream_map') url_encoded_fmt_stream_map = url_encoded_fmt_stream_map.split(',') for value in url_encoded_fmt_stream_map: value = value.replace('\\u0026', '&') attr = dict(urlparse.parse_qsl(value)) url = attr.get('url', None) if url: url = urllib.unquote(attr['url']) if 'signature' in url: video_url = url break signature = '' if attr.get('s', ''): signature = cipher.get_signature(attr['s']) elif attr.get('sig', ''): signature = attr.get('sig', '') if signature: url += '&signature=%s' % signature video_url = url break return video_url
def get_stream_link(id): link = [] try: watch_url = api_url+'watch/%s' % id json_data = requests.get(watch_url).json() link = json_data['fullurl'] except: pass return link
def get_trailer(name): aid = search(name) try: content = requests.get(anime_url % str(aid), timeout=timeout).text match = re.findall('<a itemprop="trailer" href="(.*?)"', content, re.DOTALL) for trailer in match: if 'youtube' in trailer: return trailer except: pass
def get_anime_list_html(url, cache_time=False): cache_url = common.cleanfilename(url) anime_list = json_handle.load_json(site, cache_url, cache_time=1) if anime_list: return anime_list else: anime_list = [] try: content = requests.get(base_url+url, headers=headers, timeout=timeout).text re_pages = re.findall('<dt style=.*?</dt>', content, re.DOTALL)[0] re_pages = re.findall('href="(.*?)"', re_pages) except: re_pages = [url] try: for url in re_pages: content = requests.get(base_url+url, headers=headers, timeout=timeout).text items = re.findall('<li class="item">.*?</li>', content, re.DOTALL) for item in items: url = re.findall('href="(.+?)"', item)[0] id = url.split('id=')[-1] id = id.split()[0] name = re.findall('</span>">(.+?)</span>', item)[0] try: genres = re.findall('Genre</strong>: (.*?)<br/>', item)[0] try: genre = genres.split(',') except: genre = [genres] except: genre = [] try: setting = re.findall('Setting</strong>: (.*?)<br/>', item)[0] except: setting = '' plot = re.findall('<br/><br/>(.*?)</span>', item, re.DOTALL)[0] plot = plot.replace(""","'") year = re.findall('<dt class="ayear">.*?([0-9]{4}).*?</dt>', item, re.DOTALL)[0] episodes = re.findall('<dt class="aepisode">.*?\/.*?(\d+).*?</dt>', item, re.DOTALL)[0] episodes_aired = re.findall('<dt class="aepisode">.*?(\d+).*?\/', item, re.DOTALL)[0] languages = re.findall('<img title="(.*?)"', item) languages = ', '.join(languages).replace('Enth\xc3\xa4lt ','') anime_list.append({ 'id': id, 'beschreibung': plot, 'folgenzahl': episodes, 'genre': genre, 'length': episodes_aired, 'original': name, 'setting': setting, 'languages': languages, 'year': year }) json_handle.save_json(site, cache_url, anime_list) except: pass return anime_list
def get_videos(artist): videos = [] trusted_channel = None url = 'https://www.googleapis.com/youtube/v3/search' headers = {'Host': 'www.googleapis.com', 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.36 Safari/537.36', 'Accept-Encoding': 'gzip, deflate'} params = {'part':'snippet','type':'video','maxResults':'50',#'videoDefinition':'high', 'q':'%s official' % artist,'key':'AIzaSyCky6iU_p2VjvpXwTSOpPVLsGFIdR51lQE', } if yt_proxy: try: json_data = pxy_request(url, params).json() except: return False else: try: json_data = requests.get(url, params=params, headers=headers).json() except: return False try: items = json_data['items'] first = True for item in items: try: id = item['id']['videoId'] snippet = item['snippet'] t = snippet['title'].encode('utf-8') try: t = re.sub('「',' - ', t) except: pass t = t.replace('–', '-') spl = t.split(' - ') name = spl[0].strip().decode('utf-8') title = spl[1].strip().decode('utf-8') if len(spl) > 2: title = '%s - %s' % (title, spl[2].strip().decode('utf-8')) description = snippet['description'].lower().encode('utf-8') channel = snippet['channelTitle'].lower().replace(' ','').encode('utf-8') name = check_name(artist,name) if artist.lower() == name.encode('utf-8').lower(): if status(trusted_channel,channel,artist,title,description) == True: image = snippet.get('thumbnails', {}).get('medium', {}).get('url', '') duration = '' title = clean_title(title) videos.append({'site':site, 'artist':[name], 'title':title, 'duration':duration, 'id':id, 'image':image}) if first == True: trusted_channel = channel except: pass first = False except: pass return videos
def get_more_items(json_data, url, params, headers): items = [] json_data2 = {} try: npt = json_data['nextPageToken'] if npt: params['pageToken'] = npt json_data2 = requests.get(url, params=params, headers=headers).json() items = json_data2['items'] except: pass return json_data2, items
def get_similar(name): anime_list = [] aid = search(name) try: content = requests.get(anime_url % str(aid)+'/relations#similar', timeout=timeout).text similar = re.findall('<a id="recommendations">(.*?)</main>', content, re.DOTALL)[0] items = re.findall('<img src="(.*?)" alt="(.*?)"', similar, re.DOTALL) for cover, name in items: anime_list.append({'original': name.decode('utf-8'), 'cover': cover}) return anime_list except: pass
def get_artist_id(artist): artist_id = None try: params = {'method': 'Asset.quickbarSearch', 'searchterm': artist} json_data = requests.get(base_url, headers=headers, params=params).json() artists = json_data['quickbar_search']['artists'] for a in artists: if a['title'].encode('utf-8').lower() == artist.lower(): artist_id = str(a['id']) except: pass return artist_id
def get_anime_list(url): cache_url = common.cleanfilename(url) anime_list = json_handle.load_json(site, cache_url, cache_time=1) if anime_list: return anime_list else: anime_list = [] abc_url = url try: content = requests.get(url, timeout=timeout).text re_pages = re.findall('/><div><center>.*?</center></div><br></div></div>', content)[0] re_pages = re.findall('<a href=".*?">(.*?)</a>', re_pages)[-1] except: re_pages = 1 try: for i in range(0, int(re_pages)): url = abc_url.replace('.html','-'+str(i+1)+'.html') content = requests.get(url, timeout=timeout).text items = re.findall('<div class="group">(.*?<div class="title">.*?<div class="meta_r".*?</div>)', content, re.DOTALL) for item in items: url = re.findall('<a href=".(/anime.*?html)"', item)[0] name = re.findall('title="(.+?)"', item)[0] cover = re.findall('src="(.*?)"', item)[0] try: cover = cover.replace(' ','%20') except: pass try: plot = re.findall('<div class="meta_r".*?>(.+)</div>', item, re.DOTALL)[0] plot = plot.decode('utf-8') plot = plot.replace('…', '...') except: plot = '' try: year = re.findall('Jahr</b>: ([0-9]{4})', item)[0] except: year = '' anime_list.append({'site': site, 'original': name, 'id': url, 'cover': cover, 'year': year, 'beschreibung': plot}) if anime_list: json_handle.save_json(site, cache_url, anime_list) except: pass return anime_list
def get_anime_json(id): json_data = '' site = 'burning' json_data = json_handle.load_json(site, id) if json_data: return json_data else: try: url = series_url+str(id)+'/1/' json_data = requests.get(url, timeout=timeout).json() json_handle.save_json(site, id, json_data) except: pass return json_data
def get_video_url(id): video_url = None try: token = get_token() url = 'https://apiv2.vevo.com/video/%s/streams/mp4' % str(id) params = {'token': token} json_data = requests.get(url, headers=headers, params=params).json() for q in json_data: if q['quality'] == 'High': video_url = q['url'] break except: pass return video_url
def get_episodes(id): cache_url = common.cleanfilename(id) episodes = json_handle.load_json(site, cache_url+'episodes', cache_time=1) if episodes: return episodes else: episodes = [] try: content = requests.get(base_url+id, timeout=timeout).text re_pages = re.findall("(/anime-.*?-.*?.html)'>", content) for page in re_pages: url = base_url+page content = requests.get(url, timeout=timeout).text items = re.findall('<div class="element">(.*?)<div class="meta_r">', content, re.DOTALL) for item in items: url = re.findall('href=".(.*?)"', item)[0] title = re.findall('title="(.*?)"', item)[0] episode = re.findall(r'\d+', title)[0] episodes.append({'name': title, 'url': base_url+url, 'episode': episode}) if episodes: json_handle.save_json(site, cache_url+'episodes', episodes) except: pass return episodes
def get_video_url(id): video_url = None try: token = get_token() url = 'https://apiv2.vevo.com/video/%s/streams/mp4' % str(id) headers['Authorization'] = 'Bearer ' + token json_data = requests.get(url, headers=headers).json() for q in json_data: if q['quality'] == 'High': video_url = q['url'] break except: pass return video_url
def get_new_series(): new_list = [] try: content = requests.get(base_url, timeout=timeout).text new = re.findall('Die letzten Anime Uploads(.*?)</ul>', content, re.DOTALL)[0] items = re.findall('<li style(.*?)</li>', new, re.DOTALL) for item in items: cover = re.findall('<div style="background-image: url\(.(.+?)\)', item, re.DOTALL)[0] id = cover.split('=')[-1] url = '/anime-%s.html' % id name = re.findall('<div style="text-decoration.*?>(.+?)<', item, re.DOTALL)[0] new_list.insert(0, {'site': site, 'original': name, 'id': url, 'cover': base_url+cover}) except: pass return new_list
def get_video_url(id): video_url = None height = 0 try: url = 'https://player.vimeo.com/video/%s/config' % str(id) json_data = requests.get(url, headers=headers).json() for q in json_data['request']['files']['progressive']: if height < q['height']: height = q['height'] video_url = q['url'] if height == 720: break except: pass return video_url
def get_artist_id(artist, token): artist_id = None try: url = 'https://apiv2.vevo.com/search' headers['Authorization'] = 'Bearer ' + token params = {'q': artist, 'includecategories': 'music video'} json_data = requests.get(url, headers=headers, params=params).json() artists = json_data['artists'] for a in artists: if a['name'].encode('utf-8').lower() == artist.lower(): artist_id = a['urlSafeName'] break except: return False return artist_id
def get_episodes(id): episodes = [] try: active = id.split('page=')[-1] headers = {'Cookie': 'active='+active} content = requests.get(video_url, headers=headers, timeout=timeout).text items = re.findall('<item>(.*?)</item>', content, re.DOTALL) for item in items: title = re.findall('<title>(.*?)</title>', item)[0] episode = re.findall(r'\d+', title)[0] while episode.startswith('0'): episode = episode[1:] link = re.findall('file="(.*?)"', item)[0] episodes.append({'name': title, 'url': link, 'episode': str(episode)}) except: pass return episodes
def get_artist_id(artist): artist_id = None try: url = 'https://imvdb.com/api/v1/search/entities' params = {'q': urllib.quote_plus(artist)} json_data = requests.get(url, headers=headers, params=params).json() artists = json_data['results'] for a in artists: # name not present if a['slug'].replace( '-', ' ').encode('utf-8').lower() == artist.replace( '-', ' ').lower(): artist_id = a['id'] break except: return False return artist_id
def get_artists(query): artists = [] try: params = {'method':'artist.search', 'artist':query, 'api_key':api_key, 'format':'json', 'limit':'25'} json_data = requests.get(api_url, params=params).json() for a in json_data['results']['artistmatches']['artist']: artist = a['name'] image = a['image'][-1]['#text'] listeners = a['listeners'] if image: artists.append({'artist': artist, 'image': image, 'listeners': listeners}) except: pass artists = remove_duplicates(artists) artists = sorted(artists, key=lambda k:int(k['listeners']), reverse=True) return artists
def get_video_url(_id): video_url = None try: url = 'http://imvdb.com/api/v1/video/%s' % _id params = {'include': 'sources'} json_data = requests.get(url, headers=headers, params=params).json() for q in json_data['sources']: if q['source'] == 'vimeo': import vimeo video_url = vimeo.get_video_url(q['source_data']) break if q['source'] == 'youtube': import youtube video_url = youtube.get_video_url(q['source_data']) break except: pass return video_url
def get_complete_anime_list(): cache_url = common.cleanfilename(list_url) anime_list = json_handle.load_json(site, cache_url, cache_time=1) if anime_list: return anime_list else: anime_list = [] try: content = requests.get(list_url, timeout=timeout).text liste = re.findall('<tr id=(.*?)</tr>', content, re.DOTALL) for anime in liste: url = re.findall('href="(.*?)"', anime, re.DOTALL)[0] name = re.findall('class="cmc">(.+?)</a>', anime, re.DOTALL)[0] anime_list.append({'site': site, 'original': name, 'id': url, 'beschreibung': ''}) json_handle.save_json(site, cache_url, anime_list) except: pass return anime_list
def get_search_aids(search_entered=False): if not search_entered: kb = xbmc.Keyboard('', 'Suche', False) kb.doModal() search_entered = kb.getText().replace(' ','+') final_url = search_url+search_entered aids = [] try: content = requests.get(final_url, headers=headers, timeout=timeout).text re_aid = re.findall('upload/cover/.*?/(\d+).png', content) for aid in re_aid: if not aid == '0': aids.append({ 'aid': aid }) if len(aids) > 19: break except: pass return aids
def get_artist_genre(artist): genre_list = cache.get_value(artist,'genre',lastfm='tag') if genre_list == None: genre_list = [] try: params = {'method':'artist.gettoptags', 'artist':artist, 'api_key':api_key, 'format':'json'} json_data = requests.get(api_url, params=params).json() for tag in json_data['toptags']['tag']: genre = tag['name'] list = ['german','deutsch','seen live','made in germany','japanese','anime','vocalists','songwriter'] if not any(x in genre.lower() for x in list): genre_list.append(genre) if len(genre_list) == 5: break cache.save_value(artist,'genre',genre_list,lastfm='tag') except: pass return genre_list
def get_videos(artist): videos = [] artist_id = get_artist_id(artist) if artist_id: try: params = { 'method': 'Artist.assetsByArtistId', 'artistId': artist_id } json_data = requests.get(base_url, headers=headers, params=params).json() except: return False try: for item in json_data: try: v = item['asset'] id = v['token'] artist = v['display_artist_title'] title = v['title'] image_id = str(v['video_file_id']) image = '' duration = '' image = create_image_url(image_id) try: duration = str(int(v['duration']) / 25) except: pass if v['type'] == 'MusicVideo': videos.append({ 'site': site, 'artist': [artist], 'title': title, 'duration': duration, 'id': id, 'image': image }) except: pass except: pass elif artist_id == False: return False return videos
def get_videos(artist): videos = [] q = re.sub('[^\s0-9a-zA-Z]+', '', artist) url = 'http://www.clipfish.de/devmobileapp/searchvideos/%s/mostrecent/1/100' % q try: json_data = requests.get(url).json() except: return False try: result = json_data['videos'] for r in result: try: t = r['title'] match = t.split(' - ') name = match[0].strip() if name.encode('utf-8').lower() == artist.lower(): title = match[1] if len(match) > 2: try: title = title + match[2] except: pass id = r['video_url_wifi_quality'] image = r['media_thumbnail'] duration = 0 try: duration = r['media_length'] except: pass if int(duration) > 150: videos.insert( 0, { 'site': site, 'artist': [name], 'title': title, 'duration': duration, 'id': id, 'image': image }) except: pass except: pass return videos
def get_complete_anime_list(): cache_url = common.cleanfilename(list_url) anime_list = json_handle.load_json(site, cache_url, cache_time=1) if anime_list: return anime_list else: anime_list = [] try: content = requests.get(list_url, timeout=timeout).text animetable = re.findall('<ul id="animetable">(.*?)</table>', content, re.DOTALL)[0] liste = re.findall('<a href="(.*?)"><li>(.+?)</li></a>', animetable, re.DOTALL) for url, name in liste: c = url.split('page=')[-1] cover = 'http://tavernakoma.net/images/player/%s.jpg' % c anime_list.append({'site': site, 'original': name, 'id': url, 'cover': cover, 'beschreibung': ''}) json_handle.save_json(site, cache_url, anime_list) except: pass return anime_list
def get_complete_anime_list(): cache_url = common.cleanfilename(genre_url) anime_list = json_handle.load_json(site, cache_url, cache_time=1) if anime_list: return anime_list else: anime_list = [] genre = 'Anime' try: json_data = requests.get(genre_url, timeout=timeout).json() series = json_data[genre]['series'] for serie in series: name = serie['name'].encode('UTF-8') id = serie['id'] cover = 'https://s.burning-seri.es/img/cover/%s.jpg' % id anime_list.append({'site': site, 'original': name, 'id': id, 'cover': cover, 'beschreibung': '', 'year': '', 'genre': ''}) json_handle.save_json(site, cache_url, anime_list) except: pass return anime_list
def get_artist_genre(artist): genre_list = cache.get_value(artist, 'genre', lastfm='tag') if genre_list == None: genre_list = [] try: params = { 'method': 'artist.gettoptags', 'artist': artist, 'api_key': api_key, 'format': 'json' } json_data = requests.get(api_url, params=params).json() for tag in json_data['toptags']['tag']: genre_list.append(tag['name']) if len(genre_list) == 5: break cache.save_value(artist, 'genre', genre_list, lastfm='tag') except: pass return genre_list
def get_similar_artists(artist): n = '100' value = cache.get_value(artist,n,lastfm=True) if value == None: value = [] try: params = {'method':'artist.getsimilar', 'artist':artist, 'autocorrect':'1', 'limit':n, 'api_key':api_key, 'format':'json'} json_data = requests.get(api_url, params=params).json() for item in json_data['similarartists']['artist']: value.append(item['name']) cache.save_value(artist,n,value,lastfm=True) except: pass similar_artists = [] for s in value: similar_artists.append(s) if len(similar_artists) == int(common.limit_artists()): break return similar_artists
def get_anime_info(id): cache_url = common.cleanfilename(id) anime = json_handle.load_json(site, cache_url, cache_time=7) if anime: return anime else: anime = [] try: content = requests.get(base_url+id, timeout=timeout).text name = re.findall('<li><b>Titel</b>: (.+?)</li>', content)[0] plot = re.findall('<li><b>Beschreibung</b>: (.*?)</li>', content, re.DOTALL)[0] genres = re.findall('<li><b>Genre</b>: (.*?)</li>', content)[0] try: genre = genres.split(',') except: genre = [genres] year = re.findall('<li><b>Jahr</b>: (.*?)</li>', content)[0] cover = re.findall('<div class="thumbnail".*?<img src="(.*?)"', content, re.DOTALL)[0] anime = ({'site': site, 'original': name, 'id': id, 'cover': cover, 'beschreibung': plot, 'genre': genre, 'year': year}) json_handle.save_json(site, cache_url, anime) except: pass return anime
def get_videos(artist): videos = [] token = get_token() artist_id = get_artist_id(artist, token) if artist_id: try: url = 'https://apiv2.vevo.com/artist/%s/videos' % str(artist_id) headers['Authorization'] = 'Bearer ' + token params = {'size': '200', 'page': '1'} json_data = requests.get(url, headers=headers, params=params).json() except: return False try: for v in json_data['videos']: try: id = v['isrc'] title = v['title'] image = v['thumbnailUrl'] duration = '' try: duration = v['duration'] except: pass if v['categories'][0] == 'Music Video': videos.append({ 'site': site, 'artist': [artist], 'title': title, 'duration': duration, 'id': id, 'image': image }) except: pass except: pass elif artist_id == False: return False return videos
def get_video_url(id): video_url = None try: params = { 'method': 'Asset.getClipForToken', 'token': id, 'streamingMethod': 'http' } json_data = requests.get(base_url, headers=headers, params=params).json() clip = json_data[0]['clip'] files = clip['tokens'] for q in ['high', 'medium', 'low']: try: video_url = files[q] video_url = check_file_type(video_url) except: pass if video_url: break except: return False return video_url
def _load_json_script(self, java_script_url): headers = { "Connection": "keep-alive", "User-Agent": "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.36 Safari/537.36", "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8", "DNT": "1", "Accept-Encoding": "gzip, deflate", "Accept-Language": "en-US,en;q=0.8,de;q=0.6", } url = java_script_url if not url.startswith("http"): url = "http://" + url pass java_script = "" try: result = requests.get(url, headers=headers, verify=False, allow_redirects=True) java_script = result.text except: pass return self._load_java_script(java_script)
def get_artists_by_tag(tag): n = '500' value = cache.get_value(tag,n,lastfm='tag') if value == None: value = [] try: params = {'method':'tag.gettopartists', 'tag':tag, 'api_key':api_key, 'format':'json', 'limit':n} json_data = requests.get(api_url, params=params).json() for a in json_data['topartists']['artist']: try: value.append({'artist':a['name']}) except: pass cache.save_value(tag,n,value,lastfm='tag') except: pass artists = [] for a in value: artists.append(a) if len(artists) == int(common.limit_tag()): break return artists
def get_videos(artist): videos = [] artist_id = get_artist_id(artist) if artist_id: try: url = 'https://imvdb.com/api/v1/entity/%s' % str(artist_id) params = { 'per_page': '50', 'page': '1', 'include': 'artist_videos', } json_data = requests.get(url, headers=headers, params=params).json() except: return False try: for v in json_data['artist_videos']['videos']: try: id = v['id'] title = str(v['song_title']) image = v['image']['o'] duration = '' videos.append({ 'site': site, 'artist': [artist], 'title': title, 'duration': duration, 'id': id, 'image': image }) except: pass except: pass elif artist_id == False: return False return videos
def get_videos(artist): videos = [] try: url = 'https://imvdb.com/api/v1/search/videos' params = { 'q': urllib.quote_plus(artist), 'per_page': '100', 'page': '1' } json_data = requests.get(url, headers=headers, params=params).json() except: return False try: results = json_data['results'] for v in results: try: name = v['artists'][0]['name'] if name.encode('utf-8').lower() == artist.lower(): _id = str(v['id']) title = str(v['song_title']) image = urllib.quote(v['image']['o'].encode('utf-8'), safe='%/:=&?~#+!$,;\'@()*[]') duration = '' videos.append({ 'site': site, 'artist': [artist], 'title': title, 'duration': duration, 'id': _id, 'image': image }) except: pass except: pass return videos
def get_content(url): headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 5.1; rv:27.0) Gecko/20100101 Firefox/27.0'} content = requests.get(url, headers=headers, timeout=timeout).text content = content.decode('windows-1251').encode('utf-8', 'ignore') content = content.replace("\n","").replace("\t","").replace("&","&").replace("–","-").replace("<BR>","").replace("</B>","</b>").replace(' title="Socks5 proxy 50"','') return content
def get_video_url(id): video_url = None headers = {'Host': 'www.youtube.com', 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.36 Safari/537.36', 'Referer': 'https://www.youtube.com',} params = {'v': id} url = 'https://youtube.com/watch' html = '' cookie = '' if yt_proxy: try: token = common.get_yt_pxy_token() r = pxy_request(url, params) html = r.text c = r.headers.get('set-cookie') s = re.search('s=(.+?)(?:$|;)', c) if s and token: cookie = 's=%s; token=%s' % (s.group(1), token) except: pass else: try: html = requests.get(url, params=params, headers=headers).text except: pass pos = html.find('<script>var ytplayer') if pos >= 0: html2 = html[pos:] pos = html2.find('</script>') if pos: html = html2[:pos] #proxy html else: p = re.search('<source src="(.+?)"', html, re.DOTALL) if p and cookie: src = '%s|Cookie=%s' % (p.group(1),urllib.quote_plus(cookie)) return src else: return video_url #standard html re_match_js = re.search(r'\"js\"[^:]*:[^"]*\"(?P<js>.+?)\"', html) js = '' cipher = None if re_match_js: js = re_match_js.group('js').replace('\\', '').strip('//') cipher = Cipher(java_script_url=js) re_match = re.search(r'\"url_encoded_fmt_stream_map\"\s*:\s*\"(?P<url_encoded_fmt_stream_map>[^"]*)\"', html) if re_match: url_encoded_fmt_stream_map = re_match.group('url_encoded_fmt_stream_map') url_encoded_fmt_stream_map = url_encoded_fmt_stream_map.split(',') for value in url_encoded_fmt_stream_map: value = value.replace('\\u0026', '&') attr = dict(urlparse.parse_qsl(value)) url = attr.get('url', None) if url: url = urllib.unquote(attr['url']) if 'signature' in url: video_url = url break signature = '' if attr.get('s', ''): signature = cipher.get_signature(attr['s']) elif attr.get('sig', ''): signature = attr.get('sig', '') if signature: url += '&signature=%s' % signature video_url = url break return video_url