def get_urls(url): try: vid = get_vid(url) urls = get_urls_by_vid(vid) except: urls = [] return parser2dic(urls)
def get_urls(html): try: pid = get_cntv_pid(html) urls = get_cntv_urls_by_id(pid) except: urls =None return parser2dic(urls)
def get_urls(html): try: pid = get_cntv_pid(html) urls = get_cntv_urls_by_id(pid) except: urls = None return parser2dic(urls)
def get_urls(html): allurls = [] threads = [] vid = get_qq_vid(html) for i in range(0,len(types)): thread1 = getSegUrls(types[i],[vid,fmts[i]]) threads.append(thread1) thread1.start() for i in range(0,len(threads)): threads[i].join() if threads[i].template != {} : allurls.append(threads[i].template) return parser2dic(allurls)
def get_urls(html): allurls = [] threads = [] vid = get_qq_vid(html) for i in range(0, len(types)): thread1 = getSegUrls(types[i], [vid, fmts[i]]) threads.append(thread1) thread1.start() for i in range(0, len(threads)): threads[i].join() if threads[i].template != {}: allurls.append(threads[i].template) return parser2dic(allurls)
def get_urls(html): try: vid = get_wasu_id(html) print vid suffix = get_suffix_by_html(html) url = 'http://www.wasu.cn/Api/getVideoUrl/id/' + vid + suffix info = get_content(url) root = ET.fromstring(info) url = root.find('video').text print url except: url = 'Error' return parser2dic([{'rate': '标清', 'furls': [url]}])
def get_urls(html): try: vid = get_wasu_id(html) print vid suffix = get_suffix_by_html(html) url = "http://www.wasu.cn/Api/getVideoUrl/id/" + vid + suffix info = get_content(url) root = ET.fromstring(info) url = root.find("video").text print url except: url = "Error" return parser2dic([{"rate": "标清", "furls": [url]}])
def get_urls(url): urls = [] newsvid = get_newsvid(url) if newsvid != None: newsurl = get_news_url_by_vid(newsvid) if newsurl != None: urls.append({"rate": "标清", "furls": [newsurl]}) html = get_content(url) ipadvid = get_ipadvid(html) if ipadvid != None: ipadurl = get_urls_by_vid(ipadvid) if ipadurl != None: urls.append({"rate": "标清", "furls": ipadurl}) segvids = get_segvids(html) if segvids != None: for i in range(0, len(segvids)): url = get_urls_by_vid(segvids[i]) if url != None: urls.append({"rate": rate[i], "furls": url}) return parser2dic(urls)
def get_urls(url): urls = [] newsvid = get_newsvid(url) if newsvid != None: newsurl = get_news_url_by_vid(newsvid) if newsurl != None: urls.append({'rate': '标清', 'furls': [newsurl]}) html = get_content(url) ipadvid = get_ipadvid(html) if ipadvid != None: ipadurl = get_urls_by_vid(ipadvid) if ipadurl != None: urls.append({'rate': '标清', 'furls': ipadurl}) segvids = get_segvids(html) if segvids != None: for i in range(0, len(segvids)): url = get_urls_by_vid(segvids[i]) if url != None: urls.append({'rate': rate[i], 'furls': url}) return parser2dic(urls)
def get_urls(url): urls = [] for i in range(0,3): template = {} flvcdurl = "http://www.flvcd.com/parse.php?format={}&kw={}".format(form[i], quote(url)) content = get_content(flvcdurl) furls = r2('<BR><a href=\"(.*?)\" target=',content) if furls!= []: print 'flvcd multi' template['rate'] = rate[i] template['furls'] = furls urls.append(template) else: print 'flvcd single' sfurls = r1('<br>.*?<a href=\"(.*?)\" target',content) print 'sfurls',sfurls if(sfurls!=None): template['rate'] = rate[i] template['furls'] = [sfurls] urls.append(template) return parser2dic(urls)
url = url.replace('splatid=101', 'splatid=1401') print rate[1] print url template['furls'] = [url] urls.append(template) if 'tss=ios' in url: ano = url.replace('tss=ios', 'tss=no') else: ano = url.replace('tss=no', 'tss=ios') print ano urls.append({'rate': rate[1], 'furls': [ano]}) return urls def get_urls(html): try: vid = get_letv_vid(html) print vid con = get_urls_by_vid(vid) except Exception, e: print e con = [] return parser2dic(con) #http://www.letv.com/ptv/vplay/2100756.html #http://www.letv.com/ptv/vplay/2184096.html if __name__ == "__main__": urls = get_urls('http://www.letv.com/ptv/vplay/21822487.html') print urls
furls = 'http://pl.youku.com/playlist/m3u8?' + get_m3u8url_by_param( vid, ep, ip, stream_id) template['furls'] = [furls] urls.append(template) except Exception, e: print e urls = [] return urls def get_urls(url): urls = [] try: vid = getVideoId(url) urls = get_urls_by_vid(vid) except Exception, e: print e urls = [] return parser2dic(urls) if __name__ == '__main__': url = "http://v.youku.com/v_show/id_XODgzOTYwMjI4.html" # print get_urls(url) vid = getVideoId(url) urls = get_urls_by_vid(vid) for i in urls: print i['furls'][0] print 'Video' print get_video(vid, None)
m3u8url = get_m1905_m3u8(vid) if m3u8url != None: urls.append({"rate": "标清", "furls": [m3u8url]}) fir = r1(r"(\d).*", vid) sec = r1(r"\d(\d).*", vid) info = get_content("http://static.m1905.cn/profile/vod/{}/{}/{}_1.xml".format(fir, sec, vid)) root = ET.fromstring(info) links = root.find("playlist/item").attrib for i in links: if i in ["url", "sdurl", "bkurl", "hdurl"]: template = {} template["rate"] = get_clarity(i) template["furls"] = [links[i]] urls.append(template) return urls def get_urls(html): try: vid = get_m1905_vid(html) con = get_m1905_urls(vid) except Exception, e: print e con = None return parser2dic(con) # http://www.1905.com/vod/play/597853.shtml if __name__ == "__main__": print get_urls("http://www.1905.com/vod/play/597853.shtml")
try: gcid = get_gcid(html.replace('vod','m' )) param1 = get_kankan_mparam(gcid,'param1') param2 = get_kankan_mparam(gcid,'param2') url = get_kankan_mparam(gcid,'url') param1_md5 = hashlib.new('md5', param1).hexdigest() rurl = url+'?key='+param1_md5+'&key1='+param2 except Exception,e: print e return rurl def get_urls(html): urls = [] try: furl = get_murl(html) if furl!=None: urls.append({'rate':'低清','furls':[furl]}) furls = get_url(html) if len(furls)!=0: for i in range(0,len(furls)): urls.append({'rate':rate[i],'furls':[furls[i]]}) for i in urls: print i except Exception,e: print e urls = None return parser2dic(urls) #http://vod.kankan.com/v/12/12801.shtml if __name__ == "__main__": print get_urls('http://vod.kankan.com/v/68/68873.shtml?id=731057')
def get_urls(url): urls = [] vid = get_vid(url) if vid != None: urls = get_urls_by_vid(vid) return parser2dic(urls)
vid = url.split('_')[1].split('.')[0] except Exception,e: print e vid = None return vid def get_urls(url): furls = [] vid = get_vid(url) if vid!=None: try: jsonUrl = "http://vxml.56.com/json/%s/?src=site" % vid jsonContent = json.loads(urllib2.urlopen(jsonUrl).read(),'utf-8') rfiles = jsonContent['info']['rfiles'] for rfile in rfiles: template ={} template['rate'] = get_clarity(rfile['type']) urls = [] url = rfile['url'] urls.append(url) template['furls'] = urls furls.append(template) except Exception,e: print e furls = [] return parser2dic(furls) if __name__ == '__main__': url = 'http://www.56.com/u56/v_ODQxNjcxMTc.htmll' print get_urls(url)
def get_urls(url): urls = [] vid = get_vid(url) if vid!= None: urls = get_urls_by_vid(vid) return parser2dic(urls)