def explore(tag, country, page): if bangumi_list == None: url = 'http://bangumi.bilibili.com/jsonp/timeline_v2?appkey=' + appkey moonplayer.download_page(url, explore_cb, tag) else: result = bangumi_list[tags.index(tag)] moonplayer.res_show(result)
def search_cb(content, page): global ytb_details data = json.loads(content) result = [] ytb_details = {} for item in data['items']: name = item['snippet']['title'] url = 'https://www.youtube.com/watch?v=' + item['id']['videoId'] t = { 'name': name, 'url': url, 'pic_url': item['snippet']['thumbnails']['default']['url'] } result.append(t) detail = { 'name': name, 'image': item['snippet']['thumbnails']['high']['url'], 'dates': item['snippet']['publishedAt'].split('T'), 'summary': item['snippet']['description'], 'source': [name, url] } ytb_details[url] = detail if page + 1 == len(pageTokens) and 'nextPageToken' in data: pageTokens.append(data['nextPageToken']) moonplayer.res_show(result)
def search_cb(content, data): page = json.loads(content) result = [] for item in page[u'subjects']: title = item['title'].encode('utf-8') pic_url = item['images']['medium'].encode('utf-8') mv_id = item['id'].encode('utf-8') result.append({'name': title, 'pic_url': pic_url, 'flag': mv_id}) moonplayer.res_show(result)
def search_cb(content, data): items = json.loads(content)['data']['items'] try: if items[0]['cover'].startswith('//'): pr = 'http:' else: pr = '' except: pr = '' result = [{'name': i['title'], 'url': i['uri'], 'pic_url': pr + i['cover']} for i in items] moonplayer.res_show(result)
def search_cb(content, data): page = json.loads(content) result = [] for item in page[u'subjects']: title = item['title'].encode('utf-8') pic_url = item['images']['medium'].encode('utf-8') mv_id = item['id'].encode('utf-8') result.append({ 'name': title, 'pic_url': pic_url, 'flag': mv_id}) moonplayer.res_show(result)
def search_by_key_cb(content, data): content = content.replace('\n', '') result = [] pics = [] items = list_links(content, '/detail/show') match = pic_re.search(content) while match: pics.append(match.group(1)) match = pic_re.search(content, match.end(0)) for i in xrange(0, len(items), 2): result.append({'name': items[i], 'url': items[i+1], 'pic_url': pics[i/2]}) moonplayer.res_show(result)
def explore_cb(page, data): data = json.loads(page)['data'] result = [] for item in data: name = item['title'] url = item['videoLink'] pic_url = item['img'] if not url.startswith('http'): url = 'http:' + url if not pic_url.startswith('http'): pic_url = 'http:' + pic_url result.append({'name': name, 'url': url, 'pic_url': pic_url}) moonplayer.res_show(result)
def search_cb(content, data): global sohu_cached_info items = json.loads(content)['data']['items'] albums = [] for i in items: if 'aid' in i and 'is_album' in i and i['is_album'] == 1: albums.append(i) sohu_cached_info = {str(i['aid']): i for i in albums} result = [{ 'name': i['album_name'], 'url': str(i['aid']), 'pic_url': i['ver_big_pic'] } for i in albums] moonplayer.res_show(result)
def explore_cb(content, tag): global bangumi_list bangumi_list = [[], [], [], [], [], [], []] items = json.loads(content)['list'] try: if items[0]['cover'].startswith('//'): pr = 'http:' else: pr = '' except: pr = '' for item in items: day = item['weekday'] bangumi_list[day].append({'name': item['title'], 'url': item['url'], 'pic_url': pr + item['cover']}) result = bangumi_list[tags.index(tag)] moonplayer.res_show(result)
def explore_cb(page, data): # page = page.replace('\n', '') dict_url_img = {} match = img_re.search(page) while match: url, img = match.group(1, 2) dict_url_img[url] = img match = img_re.search(page, match.end(0)) result = list_links(page, "http://tv.sohu.com/item/") items = [] for i in xrange(0, len(result), 2): name = result[i] url = result[i + 1] pic_url = dict_url_img[url] item = {"name": name, "url": url, "pic_url": pic_url} items.append(item) moonplayer.res_show(items)
def explore_cb(page, data): global sohu_cached_info global sohu_more_list global sohu_offset data = json.loads(page)['data'] try: # first page data_list = data['columns'][0]['data_list'] sohu_offset = len(data_list) sohu_more_list = data['columns'][0]['more_list'] except KeyError: # not first page data_list = data['videos'] sohu_cached_info = {str(i['aid']): i for i in data_list} result = [{ 'name': i['album_name'], 'url': str(i['aid']), 'pic_url': i['ver_big_pic'] } for i in data_list] moonplayer.res_show(result)
def search_cb(page, data): name2pic = {} result = [] # Read all pic urls match = pic_re.search(page) while match: (url, name) = match.group(1, 2) name2pic[name] = url match = pic_re.search(page, match.end(0)) # Read links, bind them with relative pic urls links = list_links(page, "http://www.youku.com/show_page/") for i in xrange(0, len(links), 2): name = links[i] url = links[i + 1] try: result.append({"name": name, "flag": url, "pic_url": name2pic[name]}) except KeyError: pass moonplayer.res_show(result)
def search_by_key_cb(content, data): result = [] items = content.split('<div class="detail">') for item in items: item = item.split('</div><!--detail end-->')[0].replace('\n', '') name_url = list_links(item, '/detail/show/') if len(name_url): name = name_url[0] url = name_url[1] match = pic_re.search(item) if match: pic = match.group(1) result.append({'name': name, 'url': url, 'pic_url': pic}) rest = items[-1].split('</div><!--item end-->', 1)[1].replace('\n', '') match = rest_re.search(rest) while match: (url, name, pic) = match.group(1, 2, 3) result.append({'name': name, 'url': url, 'pic_url': pic}) match = rest_re.search(rest, match.end(0)) moonplayer.res_show(result)
def search_cb(page, data): name2pic = {} result = [] # Read all pic urls match = pic2_re.search(page) while match: (url, name) = match.group(1, 2) name2pic[name] = url match = pic2_re.search(page, match.end(0)) # Read links, bind them with relative pic urls links = list_links(page, '/detail/show/') for i in xrange(0, len(links), 2): name = links[i] url = links[i+1] try: result.append({'name': name, 'url': url, 'pic_url': name2pic[name]}) except KeyError: pass moonplayer.res_show(result)
def search_cb(page, data): name2pic = {} result = [] # Read all pic urls match = pic_re.search(page) while match: (url, name) = match.group(1, 2) name2pic[name] = url match = pic_re.search(page, match.end(0)) # Read links, bind them with relative pic urls links = list_links(page, 'http://www.youku.com/show_page/') for i in xrange(0, len(links), 2): name = links[i] url = links[i+1] try: result.append({'name': name, 'flag': url, 'pic_url': name2pic[name]}) except KeyError: pass moonplayer.res_show(result)
def explore_cb(page, data): page = page.split('大家都在看')[0] name2pic = {} result = [] # Read all pic urls match = pic_re.search(page) while match: url, name = match.group(1, 2) name2pic[name] = url match = pic_re.search(page, match.end(0)) # Read links, bind them with relative pic urls links = list_links(page, '//v.youku.com/v_show') for i in xrange(0, len(links), 2): name = links[i] url = 'http:' + links[i+1] try: result.append({'name': name, 'url': url, 'pic_url': name2pic[name]}) except KeyError: pass moonplayer.res_show(result)
def search_cb(content, data): parser = SearchResultParser() parser.feed(content.decode('UTF-8')) moonplayer.res_show(parser.result)
def search_cb(content, data): parser = SearchResultParser() if hasattr(content, 'decode'): content = content.decode('UTF-8') parser.feed(content) moonplayer.res_show(parser.result)