Example #1
0
def getVtv(url)	:
	response = urlfetch.get(url)
	matches = re.search(r"src=\"(.+play.+?)\"", response.body)
	play_url = matches.group(1)
	headers = {'Host': 'play.sohatv.vn', 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:49.0) Gecko/20100101 Firefox/49.0', 'Referer': url}
	response = urlfetch.get(play_url, headers=headers)
	matches = re.search(r"status-code=200 src=\"(.+?)\"", response.body)
	url_play = matches.group(1)
	matches = re.search(r"live=(.+?m3u8)", url_play)
	m3u8 = matches.group(1)
	m3u8 = 'http:'+urllib.unquote_plus(m3u8)
	split_list = m3u8.split('/', 9)
	remove = split_list[8]
	vtvvn_option = 'true'
	if 'vtv5-tay-nam-bo' not in url:
		matches = re.search(r"==(.+?)\.", remove)
		remove = matches.group(1)
		if vtvvn_option == 'false':
			m3u8 = m3u8.replace(remove, '_m')
		if vtvvn_option == 'true':
			m3u8 = m3u8.replace(remove, '')
	else:
		print('Kenh vtv5 nam bo')
		if vtvvn_option == 'false':
			m3u8 = m3u8.replace(remove, 'dnR2NWtt_m.m3u8')
		if vtvvn_option == 'true':
			m3u8 = m3u8.replace(remove, 'dnR2NWtt.m3u8')
	return m3u8
Example #2
0
    def test_links(self):
        r = urlfetch.get(testlib.url('/links/0'))
        self.assertTrue(r.links)
        self.assertTrue(isinstance(r.links, list))
        self.assertTrue(len(r.links) == 1)

        r = urlfetch.get(testlib.url('/links/1'))
        self.assertTrue(r.links)
        self.assertTrue(isinstance(r.links, list))
        self.assertTrue(len(r.links) == 2)

        r = urlfetch.get(testlib.url('/links/2'))
        self.assertTrue(r.links)
        self.assertTrue(isinstance(r.links, list))
        self.assertTrue(len(r.links) == 4)

        r = urlfetch.get(testlib.url('/links/3'))
        self.assertTrue(r.links)
        self.assertTrue(isinstance(r.links, list))
        self.assertTrue(len(r.links) == 2)

        r = urlfetch.get(testlib.url('/links/none'))
        self.assertTrue(r.links)
        self.assertTrue(isinstance(r.links, list))
        self.assertTrue(len(r.links) == 1)
Example #3
0
    def test_links(self):
        r = urlfetch.get(testlib.url('/links/0'))
        self.assertTrue(r.links)
        self.assertTrue(isinstance(r.links, list))
        self.assertTrue(len(r.links) == 1)

        r = urlfetch.get(testlib.url('/links/1'))
        self.assertTrue(r.links)
        self.assertTrue(isinstance(r.links, list))
        self.assertTrue(len(r.links) == 2)

        r = urlfetch.get(testlib.url('/links/2'))
        self.assertTrue(r.links)
        self.assertTrue(isinstance(r.links, list))
        self.assertTrue(len(r.links) == 4)

        r = urlfetch.get(testlib.url('/links/3'))
        self.assertTrue(r.links)
        self.assertTrue(isinstance(r.links, list))
        self.assertTrue(len(r.links) == 2)

        r = urlfetch.get(testlib.url('/links/none'))
        self.assertTrue(r.links)
        self.assertTrue(isinstance(r.links, list))
        self.assertTrue(len(r.links) == 1)
Example #4
0
def getHaivn(url):
	headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:49.0) Gecko/20100101 Firefox/49.0',
			'Referer'  : url
		}
	response = urlfetch.get(url, headers=headers)
	if not response:
		notify(u'Trang nguồn có lỗi. Thông báo cho dev.'.encode("utf-8"))

	if 'youtube-player' in response.body:
		
		matches = re.search(r"iframe allowfullscreen=\"true\" src=\"(.+?)\?", response.body)
		
		video_url = matches.group(1)
		matches = re.search(r"embed\/(.+)", video_url)
		youtube_id = matches.group(1)
		video_url = "plugin://plugin.video.youtube/?path=/root/video&action=play_video&videoid=" + youtube_id
		
	else:
		regex = r'script type=\"text\/javascript\" src=\"(.+?mecloud-player)\"'
		matches = re.search(regex, response.body)
		if not matches:
			return ''
		url_player = matches.group(1)
		
		response = urlfetch.get(url_player, headers=headers)
		regex = r"\"video\":(\[.+?\])"
		matches = re.search(regex, response.body)
		video_url = matches.group(1)
		t = video_url.count('url')
		data = json.loads(video_url)
		video_url = data[t-1]['url']
		video_url = 'http:'+video_url
	
	return video_url
	xbmc.log(video_url)
Example #5
0
    def test_compressed_streaming(self):
        sina = urlfetch.b('sina')

        with tempfile.TemporaryFile() as f:
            with urlfetch.get('http://news.sina.com.cn/') as r:
                for chunk in r:
                    f.write(chunk)
            f.seek(0)
            html = f.read()
            self.assertTrue(sina in html)

        with tempfile.TemporaryFile() as f:
            with urlfetch.get('http://news.sina.com.cn/', headers={'Accept-Encoding': 'deflate'}) as r:
                for chunk in r:
                    f.write(chunk)
            f.seek(0)
            html = f.read()
            self.assertTrue(sina in html)

        with tempfile.TemporaryFile() as f:
            with urlfetch.get('http://news.sina.com.cn/', headers={'Accept-Encoding': 'gzip'}) as r:
                for chunk in r:
                    f.write(chunk)
            f.seek(0)
            html = f.read()
            self.assertTrue(sina in html)

        with tempfile.TemporaryFile() as f:
            with urlfetch.get('http://news.sina.com.cn/', headers={'Accept-Encoding': '*'}) as r:
                for chunk in r:
                    f.write(chunk)
            f.seek(0)
            html = f.read()
            self.assertTrue(sina in html)
Example #6
0
    def test_compressed_streaming(self):
        sina = urlfetch.b('sina')

        with tempfile.TemporaryFile() as f:
            with urlfetch.get('http://news.sina.com.cn/') as r:
                for chunk in r:
                    f.write(chunk)
            f.seek(0)
            html = f.read()
            self.assertTrue(sina in html)

        with tempfile.TemporaryFile() as f:
            with urlfetch.get('http://news.sina.com.cn/', headers={'Accept-Encoding': 'deflate'}) as r:
                for chunk in r:
                    f.write(chunk)
            f.seek(0)
            html = f.read()
            self.assertTrue(sina in html)

        with tempfile.TemporaryFile() as f:
            with urlfetch.get('http://news.sina.com.cn/', headers={'Accept-Encoding': 'gzip'}) as r:
                for chunk in r:
                    f.write(chunk)
            f.seek(0)
            html = f.read()
            self.assertTrue(sina in html)

        with tempfile.TemporaryFile() as f:
            with urlfetch.get('http://news.sina.com.cn/', headers={'Accept-Encoding': '*'}) as r:
                for chunk in r:
                    f.write(chunk)
            f.seek(0)
            html = f.read()
            self.assertTrue(sina in html)
Example #7
0
def getPhimMoi(url):
	headers = { 'User_Agent':'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:50.0) Gecko/20100101 Firefox/50.0',
				'Host'				: 'www.phimmoi.net',
				'Referer'			: url
				}
	T2="U2FsdGVkX1+J5yRXU1goqexulsqcAaICSdXjSrml+FFQiYusRAwciVrwAIW86pvrU2RGQmSb9YL/8xMaOnWGbA"		
	response = urlfetch.get(url, headers=headers)
	regex = r"(;eval.+)<\/script>"
	matches = re.search(regex, response.body)
	payload = matches.group(1)
	payload = urllib.quote(payload)
	payload = "data="+payload
	headers = {
		'content-type': "application/x-www-form-urlencoded",
		'cache-control': "no-cache"
		}
	response = urlfetch.post(vmf.gibberishAES(T2, 'vmf'), data=payload, headers=headers)
	response = urlfetch.get(response.body)
	regex = r"var _responseJson='(.+)';"
	matches = re.search(regex, response.body)
	json_data = matches.group(1)
	json_data = json.loads(json_data)
	backup_order = json_data['backupOrder']
	t = len(json_data['medias'])
	video_url = json_data['medias'][(t-1)]['url']
	return video_url
	'''
Example #8
0
def resolve_url(url):
	if freeAccount == 'true':
		response = urlfetch.fetch("http://feed.hdrepo.com/fshare.php")
		if response.status == 200:
			headers['Cookie'] = response.content
		else:
			xbmc.executebuiltin((u'XBMC.Notification("%s", "%s", %s)' % ('Login', 'Server only accepts 1 request/minute', '5000')).encode("utf-8"))	 
			return
	else:
		headers['Cookie'] = doLogin()

	response = urlfetch.get(url,headers=headers, follow_redirects=False)
	if response.status==302 and response.headers['location'].find('logout.php')<0:
		url=response.headers['location']
		# logout
		if freeAccount == 'true':
			cookie = Cookie.SimpleCookie()
			cookie.load(response.headers.get('set-cookie', ''))
			headers['Cookie'] = _makeCookieHeader(cookie)
			urlfetch.get("https://www.fshare.vn/logout.php",headers=headers, follow_redirects=False)
	else:
		if response.status==200:
			soup = BeautifulSoup(str(response.content), convertEntities=BeautifulSoup.HTML_ENTITIES)		
			item = soup.find('form', {'name' : 'frm_download'})
			if item:
				url = item['action']
		else:
			xbmc.executebuiltin((u'XBMC.Notification("%s", "%s", %s)' % ('Login', 'Login failed. You must input correct FShare username/pass in Add-on settings', '5000')).encode("utf-8"))	 
			return
	
	item = xbmcgui.ListItem(path=url)
	xbmcplugin.setResolvedUrl(int(sys.argv[1]), True, item)
Example #9
0
    def test_get_via_proxy(self):
        proxy = testlib.test_server_host[:-1]
        resp = urlfetch.get("http://www.example.com", proxies={"http": proxy})
        self.assertEqual(resp.status, 200)

        proxy = proxy.split("://", 1)[1]
        resp = urlfetch.get("http://www.example.com", proxies={"http": proxy})
        self.assertEqual(resp.status, 200)
Example #10
0
def download_sub(subtitle):
	xbmc_temp = xbmc.translatePath('special://temp')
	tempdir = os.path.join(xbmc_temp, 'phudeVMF')
	if 'subscene.com' in subtitle:
		response = urlfetch.get(subtitle)
		sub = re.search(r'href=\"(/subtitle/download?.*?)\"', response.body)
		sub = sub.group(1)
		subpath = "https://subscene.com" + sub
	if 'phudeviet.org' in subtitle:
		f = urlfetch.get(subtitle)
		match = re.search(r"(http://phudeviet.org/download/.+?html)", f.body)
		subpath = match.group(1)
		f = urlfetch.get(subpath)
		subpath = f.getheader('location')
		
	vDialog.create('Vietmediaf','Bắt đầu tải phụ đề xin vui lòng đợi trong giây lát.','Downloading...')
	if not os.path.exists(tempdir):
		try:
			xbmcvfs.mkdirs(tempdir)
			time.sleep(20)
		except:pass
	else:
		for root, dirs, files in os.walk(tempdir, topdown=False):
			for name in files:
				try:os.remove(os.path.join(root, name))
				except:pass
			for name in dirs:
				try:os.rmdir(os.path.join(root, name))
				except:pass
	
	useragent = ("User-Agent=Mozilla/5.0 (Windows NT 10.0; WOW64; rv:54.0) Gecko/20100101 Firefox/54.0")
	headers = {'User-Agent': useragent, 'Referer': subtitle}
	tmp_file = os.path.join(tempdir, "phude.zip")
	
	try:
		if os.path.exists(tmp_file):
			os.remove(tmp_file)
		request = urllib2.Request(subpath, '', headers)
		response = urllib2.urlopen(request)
		file_handle = xbmcvfs.File(tmp_file, "wb")
		file_handle.write(response.read())
		xbmc.sleep(500)
		file_handle.close()
		xbmc.executebuiltin('XBMC.Extract("%s","%s")' % (tmp_file, tempdir))
		
	except:
		notify('Không tải được phụ đề')
		pass
	vDialog.close()
	exts = [".srt", ".sub", ".txt", ".smi", ".ssa", ".ass"]
	sub_temp = os.path.join(tempdir, "sub.file")
	for file in xbmcvfs.listdir(tempdir)[1]:
		if os.path.splitext(file)[1] in exts:
			sub_file = os.path.join(tempdir, file)
			xbmcvfs.rename(sub_file, sub_temp)
			return sub_temp
Example #11
0
def get_htvplus(url):
	if len(USER_VIP_CODE) > 0:
		try:
			f='U2FsdGVkX1+RQXkDAFegicGii3RLBVGrsbMVRV+kHpUpTExURcDQLDLLDkxsGOTf'
			notify(u'VMF Getlink system'.encode("utf-8"))
			response = fetch_data(VIETMEDIA_HOST + vmf.gibberishAES(f, 'vmf'))
			json_data = json.loads(response.body)
			t =json_data['username'].decode("base64")
			headers = { 
					'User-Agent' : 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36',
					'Referer'			: url,
					'Cookie'		: t
					}
			response = urlfetch.get(url, headers=headers)
			
			regex = r"iosUrl = \"(.+?)\""	
			matches = re.search(regex, response.body)
			video_url = matches.group(1)
			get_url = 'http://hplus.com.vn/content/getlinkvideo/'
			headers = { 
					'User-Agent' : 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36',
					'Referer'			: url,
					'Cookie'		: t			
					}
			data = {'url': video_url, 'type': '1', 'is_mobile': '0'}
			response = urlfetch.post(get_url, headers=headers, data=data)
			video_url = response.body.encode("utf-8")
			refer = "|User-Agent=Mozilla%2F5.0%20%28Windows%20NT%2010.0%3B%20Win64%3B%20x64%29%20AppleWebKit%2F537.36%20%28KHTML%2C%20like%20Gecko%29%20Chrome%2F58.0.3029.110%20Safari%2F537.36&Referer=http%3A%2F%2Fhplus.com.vn%2F"
			return (video_url + refer)
		except Exception as e:
			notify('Khong lay duoc link')
			pass
	else:
		headers = { 
					'User-Agent' : 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36',
					'Referer'			: url,
					'Cookie'		: t
					}
		response = urlfetch.get(url, headers=headers)
		t = response.cookiestring;
		regex = r"iosUrl = \"(.+?)\""	
		matches = re.search(regex, response.body)
		video_url = matches.group(1)
		get_url = 'http://hplus.com.vn/content/getlinkvideo/'
		headers = { 
				'User-Agent' : 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36',
				'Referer'			: url,
				'Cookie'		: t			
				}
		data = {'url': video_url, 'type': '1', 'is_mobile': '0'}
		response = urlfetch.post(get_url, headers=headers, data=data)
		video_url = response.body.encode("utf-8")
		refer = "|User-Agent=Mozilla%2F5.0%20%28Windows%20NT%2010.0%3B%20Win64%3B%20x64%29%20AppleWebKit%2F537.36%20%28KHTML%2C%20like%20Gecko%29%20Chrome%2F58.0.3029.110%20Safari%2F537.36&Referer=http%3A%2F%2Fhplus.com.vn%2F"
		return (video_url + refer)
Example #12
0
def search_movie(item):
	title=item['title'];year=item['year'];filename=item['filename'];mansearchstr=item['mansearchstr']
	if mansearchstr:title=mansearchstr;mess('Manual search for string')
	else:title=re.sub('&#.* ','',title.replace("&","and")).strip()
	subspage_url=find_movie(title, year);subtitles=[];subs=[]
	pattern='<a href="(/subtitles/.+?)">\s+<span class=".+?">\s*(.+?)\s+</span>\s+<span>\s+(.+?)\s+</span>'
	if subspage_url:
		url=subscene+subspage_url
		subs=re.findall(pattern,urlfetch.get(url=url,headers={'Cookie':'LanguageFilter=13,45'}).body)
	if mansearchstr:
		url=subscene+'/subtitles/release?q='+urllib.quote_plus(title)+'&r=true'
		subs+=re.findall(pattern,urlfetch.get(url=url,headers={'Cookie':'LanguageFilter=13,45'}).body)
	phudeviet_url = find_phudeviet(title, year)
	if not phudeviet_url:
		phudeviet_url = google_find_phudeviet(title,year)
	if phudeviet_url:
		pattern_pdv='<td class="td4".+"(.+png)">.+\s+<td class="td1".+href="(.+?)">(.+?)<.+td>'
		for lang,href,fn in re.findall(pattern_pdv,urlfetch.get(phudeviet_url).body):
			if 'Anh.png' in lang:lang="English"
			else:lang="Phudeviet"
			subs.append((href,lang,fn))
	notification=''
	if len(subs) == 0:
		url='http://subscene.com/subtitles/release?q=%s'%title.replace(' ','.')+'.'+year
		pattern='<a href="(/subtitles/.+?)">\s+<span class=".+?">\s*(.+?)\s+</span>\s+<span>\s+(.+?)\s+</span>'
		subs=re.findall(pattern,urlfetch.get(url=url,headers={'Cookie':'LanguageFilter=13,45'}).body)
		if subs:notification=u'tìm gần đúng!'

	if len(subs) == 0:
		mess(u'Không tìm thấy phụ đề của Video: %s'%title)
	
	fn = os.path.splitext(filename)[0].split('.x264')[0].replace(' ','.').replace('-','.').replace('*','.')
	ratlist=fn.split('.')
	for link,lang,name in subs:
		name=name.strip().replace(' ','.');rat=1;label='vie'
		if 'Vietnam' in lang:img='vi';url=subscene+link
		elif 'Phude' in lang:img='vi';url=link;name='[COLOR lime]phudeviet.org[/COLOR]: '+name
		else:
			img='en';url=subscene+link
			if addon.getSetting('trans_sub')=='false':label='eng'
		for i in ratlist:
			try:
				if re.search(i,name):rat+=1
			except:pass
		subtitles.append((name,url,label,img,str(rat)))
	items=list()
	for fn,link,label,img,rating in sorted(subtitles,cmp=lambda x,y:cmp(x[0],y[3]),reverse=True):
		item = xbmcgui.ListItem(label=label,label2=fn,iconImage=rating,thumbnailImage=img)
		url="plugin://%s/?action=download&link=%s&filename=%s&img=%s"%(service,link,fn,img)
		items.append((url, item, False))
	if items:
		xbmcplugin.addDirectoryItems(int(sys.argv[1]), items)
		if not filename:filename=title
		mess(u'Movie: %s'%filename,20000,'Xshare %s: Movie year - %s '%(notification,year))
Example #13
0
    def test_content_encoding(self):
        url = testlib.url('/content-encoding/invalid-body')
        call_invalid_body = lambda: urlfetch.get(url).body
        self.assertRaises(urlfetch.ContentDecodingError, call_invalid_body)

        url = testlib.url('/content-encoding/invalid-header')
        call_invalid_header = lambda: urlfetch.get(url).body
        self.assertRaises(urlfetch.ContentDecodingError, call_invalid_header)

        url = testlib.url('/content-encoding/invalid-body/deflate')
        call_invalid_header_deflate = lambda: urlfetch.get(url).body
        self.assertRaises(urlfetch.ContentDecodingError, call_invalid_header_deflate)
Example #14
0
    def test_content_encoding(self):
        url = testlib.url('/content-encoding/invalid-body')
        call_invalid_body = lambda: urlfetch.get(url).body
        self.assertRaises(urlfetch.ContentDecodingError, call_invalid_body)

        url = testlib.url('/content-encoding/invalid-header')
        call_invalid_header = lambda: urlfetch.get(url).body
        self.assertRaises(urlfetch.ContentDecodingError, call_invalid_header)

        url = testlib.url('/content-encoding/invalid-body/deflate')
        call_invalid_header_deflate = lambda: urlfetch.get(url).body
        self.assertRaises(urlfetch.ContentDecodingError, call_invalid_header_deflate)
Example #15
0
def getTvnet(url):
	headers = {'User_Agent':'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:50.0) Gecko/20100101 Firefox/50.0','Origin': 'http://vn.tvnet.gov.vn', 'Referer': url}
	matches = re.search(r"/\d+\/(.+)", url)
	channel = matches.group(1)
	r = urlfetch.get(url, headers=headers)
	matches = re.search(r"data-file=\"(.+?)\"", r.body)
	url_get = matches.group(1)
	url_get = url_get.replace('amp;', '')
	r = urlfetch.get(url_get, headers=headers)
	json_data = json.loads(r.body)
	video_url = json_data[0]["url"]
	return video_url+'|User-Agent=Mozilla/5.0 (Windows NT 10.0; WOW64; rv:50.0) Gecko/20100101 Firefox/50.0&Referer=http://vn.tvnet.gov.vn'
Example #16
0
    def test_get_via_proxy(self):
        proxy = testlib.test_server_host[:-1]
        resp = urlfetch.get('http://www.example.com', proxies={'http':proxy})
        self.assertEqual(resp.status, 200)
        self.assertTrue(isinstance(resp.json, dict))
        self.assertTrue(isinstance(resp.text, urlfetch.unicode))

        proxy = proxy.split('://', 1)[1]
        resp = urlfetch.get('http://www.example.com', proxies={'http':proxy})
        self.assertEqual(resp.status, 200)
        self.assertTrue(isinstance(resp.json, dict))
        self.assertTrue(isinstance(resp.text, urlfetch.unicode))
Example #17
0
def getKphim(url):
	matches = re.search(r"\?vid=(\d+)\?sid=(\d+)", url)
	vid = matches.group(1)
	sid = matches.group(2)
	token=urllib2.hashlib.md5(vid+'func'+sid).hexdigest()[1:]
	getlink = 'http://kphim.tv/embed/'+vid+'/'+sid+'/'+token
	response = urlfetch.get(getlink)
	matches = re.search(r"file:\s'(.+?)'", response.body)
	video_url = matches.group(1)
	response = urlfetch.get(video_url)
	rh = response.getheaders()
	video_url = rh[5][1]
	return video_url
Example #18
0
def getmp3zing(url):	
	response = urlfetch.get(url)
	matches = re.search(r"data-xml=\"(.+?)\"", response.body)
	url_get= matches.group(1)
	if 'http' not in url_get:
		url_get = 'http://mp3.zing.vn'+url_get
	#lấy link nhạc
	response = urlfetch.get(url_get)
	json_data = json.loads(response.body)
	data = json_data["data"][0]["source_list"][1]
	if len(data) == 0:
		data = json_data["data"][0]["source_list"][0]
	return data
Example #19
0
    def test_streaming(self):
        with tempfile.TemporaryFile() as f:
            with urlfetch.get(testlib.url('utf8.txt')) as r:
                for chunk in r:
                    f.write(chunk)
            f.seek(0)
            self.assertEqual(f.read(), open(os.path.join(os.path.dirname(__file__), 'test.file'), 'rb').read())

        with tempfile.TemporaryFile() as f:
            with urlfetch.get(testlib.url('/gbk.txt')) as r:
                for chunk in r:
                    f.write(chunk)
            f.seek(0)
            self.assertEqual(f.read(), open(os.path.join(os.path.dirname(__file__), 'test.file.gbk'), 'rb').read())
Example #20
0
    def test_streaming(self):
        with tempfile.TemporaryFile() as f:
            with urlfetch.get(testlib.url('utf8.txt')) as r:
                for chunk in r:
                    f.write(chunk)
            f.seek(0)
            self.assertEqual(f.read(), open(os.path.join(os.path.dirname(__file__), 'test.file'), 'rb').read())

        with tempfile.TemporaryFile() as f:
            with urlfetch.get(testlib.url('/gbk.txt')) as r:
                for chunk in r:
                    f.write(chunk)
            f.seek(0)
            self.assertEqual(f.read(), open(os.path.join(os.path.dirname(__file__), 'test.file.gbk'), 'rb').read())
Example #21
0
def get_rss_entries(url):
    try:
        log('fetching %s', url)
        r = urlfetch.get(url, timeout=5, randua=True, max_redirects=3)
        log('%d bytes fetched', len(r.body))

        log('parsing feed content')
        d = feedparser.parse(r.body)
        log('parsing OK')
    except Exception as e:
        log('[error] get_rss_entries: %s', str(e))
        return []

    entries = []
    for e in d.entries:
        title = mb_code(e.title)
        href = mb_code(e.links[0]['href'])
        comments = mb_code(e['comments'])

        entry = {
            'title': title,
            'url': href,
            'comments': comments,
            'pubdate': datetime.utcnow().strftime("%a, %d %b %Y %H:%M:%S GMT"),
        }

        entries.append(entry)

    return entries
def find_phudeviet(title, film_year):
    href = 'http://phudeviet.org/findfilm/?select=1&film_name=%s' % urllib.quote_plus(
        title)
    pattern = ".*".join(title.split()).lower()
    url = None
    for link, name in re.findall(
            '<td class="td6"><.+?><a href="(.+?)">(.+?)</a></td>',
            urlfetch.get(href).body):
        name = re.sub("&# ", "", no_accent(name).replace("&", "and")).lower()
        if film_year and re.search(pattern, name) and film_year in name:
            url = link
            break
        elif not film_year:
            film_year1 = str(date.today().year)
            if re.search(pattern, name) and film_year1 in name:
                url = link
                break
    if not url and re.search('S\d\d', title):
        e = int(re.search('S(\d\d)', title).group(1))
        title = re.sub('S\d\d.*', 'season %d' % e, title)
        url = find_phudeviet(title, film_year)
    if not film_year and not url:
        film_year = str(date.today().year - 1)
        url = find_phudeviet(title, film_year)
    return url
def google_find_phudeviet(title, film_year):
    pattern = ".*".join(title.split()).lower()
    url = None
    hd = {
        'User-Agent': 'Mozilla/5.0',
        'Accept-Language': 'en-US,en;q=0.8,vi;q=0.6'
    }
    string_search = urllib.quote_plus('"' + title + '"')
    href = 'http://ajax.googleapis.com/ajax/services/search/web?v=1.0&rsz=large&start=0&'
    href += 'q=site:phudeviet.org+%s' % string_search
    json = urlfetch.get(href, headers=hd).json
    if json['responseStatus'] == 403:
        print 'phudeviet - Google: response status 403'
    else:
        data = json['responseData']
        if not data or not data['results']:
            print 'phudeviet - Google not data["results"]'
        else:
            results = data['results']
            for result in results:
                if 'http://phudeviet.org/film' in result['url']:
                    title = result['titleNoFormatting']
                    link = result['url']
                    title = title.replace("&", "and").replace("&#39;",
                                                              "").lower()
                    if re.search(pattern, title) and film_year in title:
                        url = link
                        break
    if url: print 'google_find_phudeviet: %s' % url
    return url
Example #24
0
def unsubscribe(service, messages, label_id):
    for item in messages:
        msg_id = item['id']
        msg = get_message(service, msg_id)
        raw_sender = get_sender(msg)
        sender = raw_sender.strip().replace(
            "\"", '') if raw_sender else 'Invalid Sender'
        url = get_unsubscribe_url(msg)

        if url:
            if not sender in seen:
                try:
                    response = urlfetch.get(url, timeout=10)
                    seen.add(sender)
                    print("{}Unsubscribed from{}: {}".format(
                        mcolors.OKGREEN, mcolors.ENDC, sender))
                except urlfetch.UrlfetchException as error:
                    print("{}Unsubscribe timeout{}: {}".format(
                        mcolors.FAIL, mcolors.ENDC, sender))
            else:
                print("{}Already Unsubscribed from{}: {}".format(
                    mcolors.OKGREEN, mcolors.ENDC, sender))
        else:
            print("{}Could not unsubscribe{}: {}".format(
                mcolors.WARNING, mcolors.ENDC, sender))

        unlabel_message(service, msg_id, label_id)
        delete_message(service, msg_id)
        print("    {}Finished Cleanup{}\n".format(mcolors.OKBLUE,
                                                  mcolors.ENDC))
Example #25
0
def get_content_url(url_api):
    result = urlfetch.get(url_api)
    if result.status_code != 200:
        xbmc.log(
                "Can't get link " + url_api + " Error Code:" + str(result.status_code) + " - " + str(result.reason))
        return None
    return result.content
Example #26
0
def get_servertv24(url):
	user = ADDON.getSetting('sctv_user')
	password = ADDON.getSetting('sctv_pass')
	channelid = re.search(re.compile(r"\/(\d+)\/"), url).group(1)
	response = urlfetch.get(url)
	if not response:
		notify('Kiểm tra nguồn phát tại [COLOR red]tv24h.vn[/COLOR] và báo cho người phát triển.')
		return
	cookie=response.cookiestring;
	matches = re.search(r'\"channel_token\" value=\"(.+?)\"', response.body)
	channeltoken = matches.group(1)
	signin_url = 'http://tv24.vn/client/login/process'
	headers = {'Host': 'tv24.vn', 'Accept-Encoding': 'gzip, deflate, compress, identity, *', 'Accept': '*/*', 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:49.0) Gecko/20100101 Firefox/49.0', 'Cookie': cookie, 'Referer': 'http://web.tv24.vn/dang-nhap'}
	data = {'mobile': user, 'password': password}
	urlfetch.post(signin_url, headers=headers, data=data)
	data = {'channel_id': channelid, 'channel_token': channeltoken}
	response = urlfetch.post('http://tv24.vn/client/channel/link', headers=headers, data=data)
	if 'null' in response.body:
		if len(user) == 0  or len(password) == 0:
			sleep(1)
			alert(u'Bạn hãy đăng ký tài khoản trên web [COLOR red]http://tv24.vn[/COLOR] và nhập trong Setting của Addon VMF'.encode("utf-8"))
		else:
			notify('Link bị lỗi')
	else:
		json_data = json.loads(response.body)
		video_url = json_data['data']['PLAY_URL']
		notify("Đang getlink")
		video_url = vmf.sctv(channeltoken, video_url)
		sleep(5)
		if len(video_url) == 0:
			alert(u'Lỗi không lấy được link. Xin vui lòng thử lại.'.encode("utf-8"))
		return (video_url)
Example #27
0
	def check_user(session_id):
		f = 'U2FsdGVkX1+fntz3Jv92YvlUvQk6pEhgPiGKJcEBVtVH9lpd8YS6idK8G9Lr7etACq/sLnO12tI2klwOz9QQWQ'
		headers = {'cookie': "session_id="+session_id}
		response = urlfetch.get(vmf.gibberishAES(f, 'Faidemteiv'), headers=headers)
		jStr = json.loads(response.body)
		c = jStr['account_type']
		return(c)
Example #28
0
def get_images_info(arm=False):
    """Gets all the publicly available system images from the Android Image Repos.

    Returns a list of AndroidSystemImages that were found and (hopefully) can boot."""
    xml = []
    for url in SYSIMG_REPOS:
        response = urlfetch.get(url)
        if response.status == 200:
            xml.append(response.content)

    licenses = [License(p) for x in xml for p in ET.fromstring(x).findall("license")]
    licenses = dict([(x.name, x) for x in [y for y in licenses]])

    xml = [ET.fromstring(x).findall("remotePackage") for x in xml]
    # Flatten the list of lists into a system image objects.
    infos = [SysImgInfo(item, licenses) for sublist in xml for item in sublist]
    # Filter only for intel images that we know that work
    x86_64_imgs = [info for info in infos if info.abi == "x86_64" and info.letter >= MIN_REL_X64]
    x86_imgs = [info for info in infos if info.abi == "x86" and info.letter >= MIN_REL_I386]
    slow = []
    if arm:
        slow = [info for info in infos if info.abi.startswith("arm")]
    all_imgs = sorted(x86_64_imgs + x86_imgs + slow, key=lambda x: x.api + x.tag)
    # Filter out windows/darwin images.
    return [i for i in all_imgs if "windows" not in i.url and "darwin" not in i.url]
Example #29
0
def getAnime47(url):
	headers = {
			'User_Agent':'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:50.0) Gecko/20100101 Firefox/50.0',
			'Referer': 'http://anime47.com',
			'Cookie'			: 'location.href=1; path=/'}
	url_data = urlfetch.get(url, headers=headers)
	matches = re.search(r"var _\w+\W+(\[.+?\])", url_data.body)
	keys = matches.group(1)
	keys = keys.decode('string_escape')
	keys = json.loads(keys)[5].encode('utf-8')
	matches = re.search(r'link:\s\"(.*?)\"', url_data.body)
	google_link = matches.group(1)
	player_url = 'http://anime47.com/player/player.php'
	data = {'link': google_link}
	response = urlfetch.post(player_url, headers=headers, data=data)
	j = response.body.decode('base64')
	jsonStr = json.loads(j)
	s = jsonStr['s']
	salt  = s.decode("hex")
	ct = jsonStr['ct']
	l = vmf.decode(ct, keys, salt)
	links = json.loads(l)
	matches = re.search(r"\"file\":\"(.*?)\"", links)
	if matches:
		links = matches.group(1)
	else:
		return 'thongbao2'
	return (links)
Example #30
0
def get_currency_rate(main_currency, requested_currency):
    """
    Gets the current rate from one currency to the other.
    Documentation: https://www.currencyconverterapi.com/docs
    :param main_currency: The currency of the system. Usually the system is in GBP
    :param requested_currency:
    :return: the current rate
    """

    conversion_key = main_currency + '_' + requested_currency
    url = f'https://free.currencyconverterapi.com/api/v6/convert?' \
          f'q={conversion_key}&compact=ultra'

    rate = None
    for i in range(1, 4):  # retries
        try:
            result = urlfetch.get(url=url, deadline=(5 + i))
            resp = json.loads(result.content)
            rate = resp.get(conversion_key)
            # retry in case of 'Internal Server Error' status code
            if result.status_code == 500:
                time.sleep(1)
                continue
            break
        except urlfetch.UrlfetchException as e:
            print(e)
            time.sleep(1)
            continue

    return rate
Example #31
0
def check_fshare():
	username = ADDON.getSetting('fshare_username')
	password = ADDON.getSetting('fshare_password')
	login_url = 'https://www.fshare.vn/login'
	response = urlfetch.fetch(login_url)
	#alert(username)
	#alert(password)
	csrf_pattern = '\svalue="(.+?)".*name="fs_csrf"'
	csrf=re.search(csrf_pattern, response.body)
	fs_csrf = csrf.group(1)
	#alert(fs_csrf)
	headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36 VietMedia/1.0', 'Cookie': response.cookiestring}
	data = {
				"LoginForm[email]"		: username,
				"LoginForm[password]"	: password,
				"fs_csrf"				: fs_csrf
			}
	response = urlfetch.post(login_url, headers=headers, data=data)
	if 'Sai tên đăng nhập hoặc mật khẩu.' in response.body:
		alert('Sai tên đăng nhập hoặc mật khẩu. Xin vui lòng kiểm tra lại user và password', '[COLOR yellow]Fshare thông báo[/COLOR]')
		sys.exit("Error message")
	headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36 VietMedia/1.0', 'Cookie': response.cookiestring}
	check_acc = urlfetch.get('https://www.fshare.vn/account/infoaccount', headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36 VietMedia/1.0', 'Cookie': response.cookiestring})
	regex = r"data-target=\"#member\">(.+?)</a>"
	ma_tk=re.search(regex, check_acc.body)
	ma_tk=ma_tk.group(1)
	ma_tk='Loại tài khoản: [COLOR red]'+ma_tk+'[/COLOR]'
	date_create=re.search(r"<dt>Ngày tham gia</dt>.*\n.+?<dd>(.+?)</dd>", check_acc.body)
	date_create=date_create.group(1)
	date_create=date_create.rstrip()
	date_create='Ngày tham gia: [COLOR red]'+date_create+'[/COLOR]'
	acc_id=re.search(r"<dt>Mã Tài Khoản</dt>.*\n.+?<dd>(.+?)</dd>",check_acc.body)
	acc_id=acc_id.group(1)
	acc_id='Mã tài khoản: [COLOR red]'+acc_id+'[/COLOR]'
	expire_date=re.search(r"<dt>Hạn dùng</dt>.*\n.+?<dd>(.+?)</dd>",check_acc.body)
	expire_date=expire_date.group(1)
	#expire='Hạn dùng: [COLOR red]'+expire+'[/COLOR]'
	bonus=re.search(r"<dt>Điểm thưởng</dt>.*\n.+?<dd>(.+?)</dd>",check_acc.body)
	bonus=bonus.group(1)
	bonus='Điểm thưởng: [COLOR red]'+bonus+'[/COLOR]'
	check_acc = urlfetch.get('https://www.fshare.vn/account/profile', headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36 VietMedia/1.0', 'Cookie': response.cookiestring})
	regex = r'Địa chỉ email</label>.+<div class=\"col-sm-8.+text-primary\">(.+?)<\/div>'
	email = re.search(regex, check_acc.body)
	email = email.group(1)
	email = 'Địa chỉ e-mail: [COLOR red]'+email+'[/COLOR]'
	info=acc_id+'\n'+ma_tk+'\n'+date_create+'\n'+'Hạn dùng: [COLOR red]'+expire_date+'[/COLOR]\n'+bonus+'\n'+email
	TextBoxes('Trạng thái tài khoản fshare', info)
Example #32
0
def get_vtvgo(url):
	response = urlfetch.get(url)
	matches = re.search(r"addPlayer\('(.+?)'", response.body)
	if not matches:
		return 'thongbao2'
	else:	
		video_url = matches.group(1)
		return(video_url+'|Referer=http%3a%2f%2fvtvgo.vn&User-Agent=Mozilla%2f5.0+(Windows+NT+10.0%3b+WOW64%3b+rv%3a48.0)+Gecko%2f20100101+Firefox%2f48.0')
Example #33
0
    def test_redirect(self):
        r = urlfetch.get(testlib.url('/redirect/3/0'))
        self.assertTrue(r.status in (301, 302, 303, 307))
        self.assertEqual(r.links, [])
        self.assertTrue('location' in r.headers)

        self.assertRaises(urlfetch.UrlfetchException, lambda: urlfetch.get(testlib.url('/redirect/3/0'), max_redirects=1))
        self.assertRaises(urlfetch.UrlfetchException, lambda: urlfetch.get(testlib.url('/redirect/3/0'), max_redirects=2))

        r = urlfetch.get(testlib.url('/redirect/3/0'), max_redirects=3)
        o = r.json
        self.assertEqual(r.status, 200)
        self.assertEqual(len(r.history), 3)
        self.assertEqual(o['method'], 'GET')
        self.assertTrue('location' not in r.headers)
        self.assertTrue(isinstance(r.json, dict))
        self.assertTrue(isinstance(r.text, urlfetch.unicode))
Example #34
0
    def test_redirect(self):
        r = urlfetch.get(testlib.url('/redirect/3/0'))
        self.assertTrue(r.status in (301, 302, 303, 307))
        self.assertEqual(r.links, [])
        self.assertTrue('location' in r.headers)

        self.assertRaises(urlfetch.UrlfetchException, lambda: urlfetch.get(testlib.url('/redirect/3/0'), max_redirects=1))
        self.assertRaises(urlfetch.UrlfetchException, lambda: urlfetch.get(testlib.url('/redirect/3/0'), max_redirects=2))

        r = urlfetch.get(testlib.url('/redirect/3/0'), max_redirects=3)
        o = r.json
        self.assertEqual(r.status, 200)
        self.assertEqual(len(r.history), 3)
        self.assertEqual(o['method'], 'GET')
        self.assertTrue('location' not in r.headers)
        self.assertTrue(isinstance(r.json, dict))
        self.assertTrue(isinstance(r.text, urlfetch.unicode))
Example #35
0
    def test_fragment(self):
        r = urlfetch.get(testlib.test_server_host + '#urlfetch')
        o = json.loads(r.text)

        self.assertEqual(r.status, 200)
        self.assertTrue(isinstance(r.json, dict))
        self.assertTrue(isinstance(r.text, urlfetch.unicode))
        self.assertEqual(o['method'], 'GET')
Example #36
0
    def test_basic_auth(self):
        r = urlfetch.get(testlib.test_server_host + 'basic_auth', auth=('urlfetch', 'fetchurl'))
        o = json.loads(r.text)

        self.assertEqual(r.status, 200)
        self.assertTrue(isinstance(r.json, dict))
        self.assertTrue(isinstance(r.text, urlfetch.unicode))
        self.assertEqual(o['method'], 'GET')
Example #37
0
def get_userinfo(workspace, user_id):
    token = settings.get_bot_token(workspace)
    if not token:
        current_app.logger.warning(
            f"Requested token for workspace {workspace} but found none")
        return None

    if current_app.config.get('FAKE_SLACK'):
        return json.loads('''{
                            "ok": true,
                            "user": {
                                "id": "W012A3CDE",
                                "team_id": "T012AB3C4",
                                "name": "spengler",
                                "deleted": false,
                                "color": "9f69e7",
                                "real_name": "Egon Spengler",
                                "tz": "America/Los_Angeles",
                                "tz_label": "Pacific Daylight Time",
                                "tz_offset": -25200,
                                "profile": {
                                    "avatar_hash": "ge3b51ca72de",
                                    "status_text": "Print is dead",
                                    "status_emoji": ":books:",
                                    "real_name": "Egon Spengler",
                                    "display_name": "spengler",
                                    "real_name_normalized": "Egon Spengler",
                                    "display_name_normalized": "spengler",
                                    "email": "*****@*****.**",
                                    "image_24": "https://.../avatar/e3b51ca72dee4ef87916ae2b9240df50.jpg",
                                    "image_32": "https://.../avatar/e3b51ca72dee4ef87916ae2b9240df50.jpg",
                                    "image_48": "https://.../avatar/e3b51ca72dee4ef87916ae2b9240df50.jpg",
                                    "image_72": "https://.../avatar/e3b51ca72dee4ef87916ae2b9240df50.jpg",
                                    "image_192": "https://.../avatar/e3b51ca72dee4ef87916ae2b9240df50.jpg",
                                    "image_512": "https://.../avatar/e3b51ca72dee4ef87916ae2b9240df50.jpg",
                                    "team": "T012AB3C4"
                                },
                                "is_admin": true,
                                "is_owner": false,
                                "is_primary_owner": false,
                                "is_restricted": false,
                                "is_ultra_restricted": false,
                                "is_bot": false,
                                "updated": 1502138686,
                                "is_app_user": false,
                                "has_2fa": false
                            }
                        }''')

    headers = {
        'User-Agent': f'karmabot/{karmabot.__version__}',
        'Content-Type': 'application/json; charset=utf-8'
    }
    result = urlfetch.get(
        url="https://slack.com/api/users.info?user=%s&token=%s" %
        (user_id, token),
        headers=headers)
    return result
def search_movie(title, year, filename):
	title=title.replace("&","and").replace("&#39;","").strip();subtitles=[];subs=[]
	subspage_url = find_movie(title, year)
	if subspage_url:
		url = subscene+subspage_url
		print 'aaaa',url
		pattern='<a href="(/subtitles/.+?)">\s+<span class=".+?">\s*(.+?)\s+</span>\s+<span>\s+(.+?)\s+</span>'
		subs=re.findall(pattern,urlfetch.get(url=url,headers={'Cookie':'LanguageFilter=13,45'}).body)
		
	phudeviet_url = find_phudeviet(title, year)
	if not phudeviet_url:
		phudeviet_url = google_find_phudeviet(title,year)
	if phudeviet_url:
		pattern_pdv='<td class="td4".+"(.+png)">.+\s+<td class="td1".+href="(.+?)">(.+?)<.+td>'
		for lang,href,filename in re.findall(pattern_pdv,urlfetch.get(phudeviet_url).body):
			if 'Anh.png' in lang:lang="English"
			else:lang="Phudeviet"
			subs.append((href,lang,filename))
	if len(subs) == 0:
		url='http://subscene.com/subtitles/release?q=%s'%title.replace(' ','.')+'.'+year
		pattern='<a href="(/subtitles/.+?)">\s+<span class=".+?">\s*(.+?)\s+</span>\s+<span>\s+(.+?)\s+</span>'
		subs=re.findall(pattern,urlfetch.get(url=url,headers={'LanguageFilter':'13,45'}).body)
		if subs:mess(u'List subs này có chứa sub có tên phim gần đúng!')

	if len(subs) == 0:
		mess(u'Không tìm thấy phụ đề của Video: %s'%title)
	
	filename = os.path.splitext(filename)[0].split('.x264')[0].replace(' ','.').replace('-','.').replace('*','.')
	ratlist=filename.split('.')
	for link,lang,name in subs:
		name=name.strip().replace(' ','.');rat=1;label='vie'
		if 'Vietnam' in lang:img='vi';url=subscene+link
		elif 'Phude' in lang:img='vi';url=link;name='[COLOR lime]phudeviet.org[/COLOR]: '+name
		else:img='en';url=subscene+link
		for i in ratlist:
			if re.search(i,name):rat+=1
		#filename:name,link:url,label:label,img:img,rating:str(rat)
		subtitles.append((name,url,label,img,str(rat)))
	items=list()
	for filename,link,label,img,rating in sorted(subtitles,cmp=lambda x,y:cmp(x[0],y[3]),reverse=True):
		item = xbmcgui.ListItem(label=label,label2=filename,iconImage=rating,thumbnailImage=img)
		url="plugin://%s/?action=download&link=%s&filename=%s&img=%s"%(service,link,filename,img)
		items.append((url, item, False))
	xbmcplugin.addDirectoryItems(int(sys.argv[1]), items)
	xbmcplugin.endOfDirectory(int(sys.argv[1]))
Example #39
0
 def random_user(channel):
     response = urlfetch.get(
         "https://2g.be/twitch/randomviewer.php?channel=" + channel)
     random_user = ""
     buf = str(response.content)
     buf = buf.split("'")
     random_user = buf[1]
     random_user = random_user[:-1:]
     return random_user
Example #40
0
    def test_fragment(self):
        r = urlfetch.get(testlib.url('#urlfetch'))
        o = json.loads(r.text)

        self.assertEqual(r.status, 200)
        self.assertTrue(isinstance(r.json, dict))
        self.assertTrue(isinstance(r.text, urlfetch.unicode))
        self.assertEqual(r.links, [])
        self.assertEqual(o['method'], 'GET')
Example #41
0
    def test_basic_auth(self):
        r = urlfetch.get(testlib.url('basic_auth'), auth=('urlfetch', 'fetchurl'))
        o = json.loads(r.text)

        self.assertEqual(r.status, 200)
        self.assertTrue(isinstance(r.json, dict))
        self.assertTrue(isinstance(r.text, urlfetch.unicode))
        self.assertEqual(r.links, [])
        self.assertEqual(o['method'], 'GET')
Example #42
0
def api_fetch(api, channel=environ['TWITCH_CHANNEL'], user=None):
    """Fetch certain Twitch API functions"""
    url = f"https://beta.decapi.me/twitch/{api}/{channel}/"
    if user:
        url += user
    #f"https://beta.decapi.me/twitch/{api}?channel={channel}&user={user}"
    response = urlfetch.get(url)
    content = response.content.decode('utf-8')
    return content
Example #43
0
def google_find_phudeviet(title,film_year):
	pattern=".*".join(title.split()).lower();url = None
	hd={'User-Agent':'Mozilla/5.0','Accept-Language':'en-US,en;q=0.8,vi;q=0.6'}
	string_search = urllib.quote_plus('"'+title+'"')
	href='http://ajax.googleapis.com/ajax/services/search/web?v=1.0&rsz=large&start=0&'
	href+='q=site:phudeviet.org+%s'%string_search
	json=urlfetch.get(href,headers=hd).json
	if json['responseStatus']==403:print 'phudeviet - Google: response status 403'
	else:
Example #44
0
def GoogleDrive(url):
	response = urlfetch.get(url)
	if response.status == 302:
		t = response.getheader('set-cookie')
		link = response.getheader('location')
		video_url = link+'|'+'Cookie='+t
	else:
		video_url = 'thongbao4-Video het luot xem. Xin vui long quay lai sau.'
	return video_url
Example #45
0
 def get_chatters(channel):
     response = urlfetch.get('https://tmi.twitch.tv/group/user/' + channel +
                             '/chatters')
     data = json.load(response)
     chatters_list = []
     for element in data['chatters'].keys():
         for x in data['chatters'][element]:
             chatters_list.append(x)
     return chatters_list
Example #46
0
    def test_fragment_basic_auth(self):
        r = urlfetch.get(testlib.url('basic_auth#urlfetch'), auth=('urlfetch', 'fetchurl'))
        o = json.loads(r.text)

        self.assertEqual(r.status, 200)
        self.assertTrue(isinstance(r.json, dict))
        self.assertTrue(isinstance(r.text, urlfetch.unicode))
        self.assertEqual(r.links, [])
        self.assertEqual(o['method'], 'GET')
Example #47
0
    def test_get(self):
        r = urlfetch.get(testlib.url())
        o = json.loads(r.text)

        self.assertEqual(r.status, 200)
        self.assertTrue(isinstance(r.json, dict))
        self.assertTrue(isinstance(r.text, urlfetch.unicode))
        self.assertEqual(r.links, [])
        self.assertEqual(o['method'], 'GET')
Example #48
0
    def test_history(self):
        r = urlfetch.get(testlib.url('/redirect/5/0'), max_redirects=10)
        self.assertTrue(not not r.history)

        responses = r.history[:]
        responses.append(r)
        responses.reverse()

        for r1, r2 in zip(responses, responses[1:]):
            self.assertEqual(r1.history[:-1], r2.history)
Example #49
0
def make_request(url,headers=hd,resp='b',maxr=0):
	try:
		if maxr==0:response=get(url,headers=headers,timeout=10)
		else:response=get(url,headers=headers,max_redirects=maxr,timeout=10)
		if resp=='o':resp=response
		else:
			if resp=='j':resp=response.json
			elif resp=='s':resp=response.status
			elif resp=='u':resp=response.text
			elif resp=='c':resp=response.cookiestring
			else:resp=response.body
			response.close()
	except:
		if resp=='j':resp=dict()
		elif resp=='s':resp=500
		else:resp=''
		if 'vaphim.com' not in url:
			mess(u'Lỗi kết nối tới: %s!'%xsearch('//(.{5,20}\.\w{2,3})',s2u(url),1),'make_request')
		print 'Lỗi kết nối tới: %s!'%u2s(url);
	return resp#unicode:body=response.text
Example #50
0
def get_rss_entries(url):
    try:
        log('fetching %s', url)
        r = urlfetch.get(url, timeout=5, randua=True, max_redirects=3)
        log('%d bytes fetched', len(r.body))

        log('parsing feed content')
        d = feedparser.parse(r.body)
        log('parsing OK')
    except Exception, e:
        log('[error] get_rss_entries: %s', str(e))
        return []
Example #51
0
def get_reverse_geo(lat, lon):
    url = "https://nominatim.openstreetmap.org/reverse?format=jsonv2&lat={}&lon={}".format(
        lat, lon)
    response = urlfetch.get(url)

    try:
        r_json = json.loads(response.content)
        roadname = r_json['display_name']

        return roadname
    except:
        print("Error in reverse geo code")
def get_emus_info():
    """Gets all the publicly available system images from the Android Image Repos.

         Returns a list of AndroidSystemImages that were found.    """
    xml = []
    for url in EMU_REPOS:
        response = urlfetch.get(url)
        if response.status == 200:
            xml.append(response.content)
    xml = [[p for p in ET.fromstring(x).findall("remotePackage") if "emulator" == p.attrib["path"]] for x in xml]
    # Flatten the list of lists into a system image objects.
    infos = [EmuInfo(item) for sublist in xml for item in sublist]
    return infos
Example #53
0
def get_latlong(obname, oaname, dbname, daname):
    li = [obname, oaname, dbname, daname]
    for i in range(0, len(li)):
        li[i] = li[i].replace(' ', '%20')
        li[i] = li[i].replace('.', '')
    urlo = "https://nominatim.openstreetmap.org/search/{}%20{}%20Bangalore?format=json&addressdetails=1&limit=1".format(
        li[0], li[1])
    responseo = urlfetch.get(urlo)
    urld = "https://nominatim.openstreetmap.org/search/{}%20{}%20Bangalore?format=json&addressdetails=1&limit=1".format(
        li[2], li[3])
    responsed = urlfetch.get(urld)
    print(responseo.content)
    try:
        o_json = json.loads(responseo.content)
        d_json = json.loads(responsed.content)
        lat_o = float(o_json[0]['lat'])
        lon_o = float(o_json[0]['lon'])
        lat_d = float(d_json[0]['lat'])
        lon_d = float(d_json[0]['lon'])
        return [[lat_o, lon_o], [lat_d, lon_d]]
    except:
        print("Bt in lat long function")
Example #54
0
    def test_get_params(self):
        p1 = (randstr(), randstr())
        p2 = (randstr(), randstr())
        params = dict((p1, p2))
        r = urlfetch.get(testlib.test_server_host, params=params)
        o = json.loads(r.text)

        self.assertEqual(r.status, 200)
        self.assertTrue(isinstance(r.json, dict))
        self.assertTrue(isinstance(r.text, urlfetch.unicode))
        self.assertEqual(r.links, [])
        self.assertEqual(o['method'], 'GET')
        self.assertTrue(('%s=%s' % p1) in r.url)
        self.assertTrue(('%s=%s' % p2) in r.url)
Example #55
0
    def test_fragment_query_string(self):
        qs = testlib.randdict(5)
        query_string = urlfetch.urlencode(qs)

        r = urlfetch.get(testlib.url('?' + query_string + '#urlfetch'))
        o = json.loads(r.text)

        self.assertEqual(r.status, 200)
        self.assertTrue(isinstance(r.json, dict))
        self.assertTrue(isinstance(r.text, urlfetch.unicode))
        self.assertEqual(r.links, [])
        self.assertEqual(o['method'], 'GET')
        self.assertEqual(o['query_string'], query_string)
        self.assertEqual(o['get'], qs)
def _download(url, dest):
    if os.path.exists(dest):
        print("  Skipping already downloaded file: {}".format(dest))
        return dest
    with urlfetch.get(url) as r:
        with tqdm(r,
                  total=int(r.headers['content-length']),
                  unit='B',
                  unit_scale=True) as t:
            with open(dest, 'wb') as f:
                for data in r:
                    f.write(data)
                    t.update(len(data))
    return dest
Example #57
0
def resolve_url(url):
    #if not login():
    #   return
    #headers['Cookie'] = cache.get('cookie')
    #print headers['Cookie']
    headers['Cookie'] = doLogin()
    response = urlfetch.get(url, headers=headers, follow_redirects=False)
    if response.status == 302 and response.headers['location'].find(
            'logout.php') < 0:
        url = response.headers['location']
    else:
        login()

    item = xbmcgui.ListItem(path=url)
    xbmcplugin.setResolvedUrl(int(sys.argv[1]), True, item)
Example #58
0
def _download(url, dest):
    """Downloads the given url to the given destination with a progress bar.

    This function will immediately return if the file already exists.
    """
    if os.path.exists(dest):
        print("  Skipping already downloaded file: {}".format(dest))
        return dest
    with urlfetch.get(url) as r:
        with tqdm(r, total=int(r.headers["content-length"]), unit="B", unit_scale=True) as t:
            with open(dest, "wb") as f:
                for data in r:
                    f.write(data)
                    t.update(len(data))
    return dest
Example #59
0
def get_images_info():
    """Gets all the publicly available system images from the Android Image Repos.
         Returns a list of AndroidSystemImages that were found.
      """
    xml = []
    for url in SYSIMG_REPOS:
        response = urlfetch.get(url)
        if response.status == 200:
            xml.append(response.content)
    xml = [ET.fromstring(x).findall('remotePackage') for x in xml]
    # Flatten the list of lists into a system image objects.
    infos = [SysImgInfo(item) for sublist in xml for item in sublist]
    # Filter only for x86_64 images (TODO: allow other types)
    x64_images = filter(lambda info: info.abi == "x86_64", infos)
    return x64_images