def test_adfly_2(self): self.assertEqual(unshortenit.unshorten('http://adf.ly/WzXu2'), ('http://www39.zippyshare.com/v/69303767/file.html', 200)) self.assertEqual(unshortenit.unshorten('http://adf.ly/1icWR'), ('http://adf.ly/1icWR', 'No ysmm variable found')) self.assertEqual(unshortenit.unshorten('http://links.devitrianto.com/yy', type='adfly'), ('https://www.sendspace.com/file/a2z6ji', 200)) # sendspace HEAD responses force HTTPS
def test_generic(self): self.assertEqual(unshortenit.unshorten('http://ul.to'), ('http://uploaded.net/', 200)) self.assertEqual(unshortenit.unshorten('http://t.co/fsbtLWPUIJ'), ('http://www.igen.fr/app-store/drift-mania-street-outlaws-du-drift-son-paroxysme-108452', 200)) # Link is now 404 self.assertEqual(unshortenit.unshorten('http://p.ost.im/d7DPHP'), ('http://p.ost.im/d7DPHP', 'list index out of range'))
def test_adfly(self): self.assertEqual(unshortenit.unshorten('http://adf.ly/WzXu2'), ('http://www39.zippyshare.com/v/69303767/file.html', 200)) self.assertEqual(unshortenit.unshorten('http://adf.ly/1icWR'), ('http://adf.ly/1icWR', 'No ysmm variable found')) self.assertEqual(unshortenit.unshorten('http://links.devitrianto.com/yy', type='adfly'), ('http://www.sendspace.com/file/a2z6ji', 200)) self.assertEqual(unshortenit.unshorten('http://adf.ly/bJ8mm'), ('http://www.mediafire.com/download/cixal2y0auya19m/com.ratrodstudio.skateparty2.zip', 200))
def test_generic(self): self.assertEqual(unshortenit.unshorten('http://ul.to'), ('http://uploaded.net/', 200)) self.assertEqual(unshortenit.unshorten('http://t.co/fsbtLWPUIJ'), ( 'http://www.igen.fr/app-store/drift-mania-street-outlaws-du-drift-son-paroxysme-108452', 200)) # Link is now 404 self.assertEqual(unshortenit.unshorten('http://p.ost.im/d7DPHP'), ('http://p.ost.im/d7DPHP', 'list index out of range'))
def test_adfly_2(self): self.assertEqual( unshortenit.unshorten('http://adf.ly/WzXu2'), ('http://www39.zippyshare.com/v/69303767/file.html', 200)) self.assertEqual(unshortenit.unshorten('http://adf.ly/1icWR'), ('http://adf.ly/1icWR', 'No ysmm variable found')) self.assertEqual( unshortenit.unshorten('http://links.devitrianto.com/yy', type='adfly'), ('https://www.sendspace.com/file/a2z6ji', 200)) # sendspace HEAD responses force HTTPS
def processPage(self, content): soup = common.util.webFunctions.as_soup(self.content) releases = [] for tweet in soup.find_all('li', attrs={"data-item-type":"tweet"}): if "promoted" in str(tweet['class']): continue content = tweet.find("p", class_='tweet-text') if content and content.a: itemtxt = content.get_text() itemurl = content.a['data-expanded-url'] itemurl, status = unshortenit.unshorten(itemurl) if status != 200: continue urlnl = urllib.parse.urlsplit(itemurl).netloc.lower() if urlnl == 'www.baka-tsuki.org': msg = self.dispatchBT(itemurl, itemtxt) if msg: releases.append(msg) if urlnl in NANO_DESU_MAP: msg = self.dispatchNanoDesu(urlnl, itemurl, itemtxt) if msg: releases.append(msg) self.log.info("Found %s releases from Twitter Feed", len(releases)) if releases: self.sendReleases(releases)
def _downloadBrowser(self, destination): """ Function which handles browser requires. Resolves adf.ly links as well as other shorteners (it does hit them and generate revenue - no bypass), and handles Dropbox links as well. Args: destination: string of the destination file. """ response = requests.get(self.item.get('url')) if response.status_code != 200: print('Failed to download %s with response code %i' % (self.item.get('url'), response.status_code)) content = BeautifulSoup(response.content) pattern = quote(self.item.get('file')) link = content.find(href=re.compile(pattern)) if not link: url, status = unshortenit.unshorten(self.item.get('url')) else: url = link.get('href') if not url: print('Failed to download %s, could not find a link!' % self.item.get('url')) return if 'dropbox.com' in url: url += '?dl=1' self._runDownload(url, destination)
def processPage(self, content): soup = common.util.webFunctions.as_soup(self.content) releases = [] for tweet in soup.find_all('li', attrs={"data-item-type": "tweet"}): if "promoted" in str(tweet['class']): continue content = tweet.find("p", class_='tweet-text') if content and content.a: itemtxt = content.get_text() itemurl = content.a['data-expanded-url'] itemurl, status = unshortenit.unshorten(itemurl) if status != 200: continue urlnl = urllib.parse.urlsplit(itemurl).netloc.lower() if urlnl == 'www.baka-tsuki.org': msg = self.dispatchBT(itemurl, itemtxt) if msg: releases.append(msg) if urlnl in NANO_DESU_MAP: msg = self.dispatchNanoDesu(urlnl, itemurl, itemtxt) if msg: releases.append(msg) self.log.info("Found %s releases from Twitter Feed", len(releases)) if releases: self.sendReleases(releases)
def _unshorten(self): # Unshorten any shorted urls if self._url.startswith("http://adf.ly"): new_url, status = unshorten(self._url) if not status == 200: raise DownloadException("Unable to unshorten url: '%s'" % (self._url,)) self._url = new_url
def Get_links(name, url): #10 username = control.setting('username') password = control.setting('password') lcookie = cache.get(_Login, 8, BASEURL, username, password) name = re.sub('\)\s*\[.+?]', ')', name) r = cache.get(client.request, 2, url, True, True, False, None, None, None, False, None, None, lcookie) calidad = client.parseDOM(r, 'span', attrs={'class': 'calidad2'})[0] calidad = client.replaceHTMLCodes(calidad) calidad = calidad.encode('utf-8') if 'Προσε' in calidad: trailer = Trailer(url) addDir( '[B][COLOR white]%s | [B][COLOR lime]Trailer[/COLOR][/B]' % name, trailer, 100, iconimage, FANART, '') else: try: back = client.parseDOM(r, 'img', ret='src', attrs={'class': 'cover'})[0] except BaseException: back = client.parseDOM(r, 'img', ret='src', attrs={'itemprop': 'image'})[0] try: data = client.parseDOM(r, 'div', attrs={'class': 'tabcontent'}) links = zip(client.parseDOM(data, 'a', ret='href'), client.parseDOM(data, 'a')) description = Sinopsis(url) trailer = Trailer(url) addDir( '[B][COLOR white]%s | [B][COLOR lime]Trailer[/COLOR][/B]' % name, trailer, 100, iconimage, back, '') for url, host in links: host = clear_Title(host).encode('utf-8') url = re.sub('http://adf.ly/\d+/', '', url) if 'buck' in url: continue elif 'adf.ly' in url: url = unshortenit.unshorten(str(url)) if not url[1] == 200: continue else: url = url[0] if 'easybytez' in url: continue if 'zippy' in url: continue title = '%s [B][COLOR white]| %s[/COLOR][/B]' % ( name, host.capitalize()) addDir(title, url, 100, iconimage, back, str(description)) except BaseException: pass views.selectView('movies', 'movie-view')
def test_anonymz(self): test_links = [ ('https://anonymz.com/?http://example.com/', ('http://example.com/', 200)), ('https://anonymz.com/?http://stackoverflow.com/', ('http://stackoverflow.com/', 200)), ] for link, correct_result in test_links: obtained_result = unshortenit.unshorten(link) self.assertEqual(obtained_result, correct_result)
def test_invalid(self): test_links = [ 'meloinvento', 'htp:/kk', 'wwww.kk.es', 'httpp://www.kk.es' ] for link in test_links: uri, res = unshortenit.unshorten(link) self.assertEqual(uri, link) self.assertNotEqual(res, 200)
def restore(url): # attempt to unshorten unshortened_uri,status = unshortenit.unshorten(url) # verify if URL worked, then return unshortened if status == 200: return unshortened_uri else: return None
def findvideos(item): logger.info() itemlist = [] data = httptools.downloadpage(item.url).data data = re.sub(r"<!--.*?-->", "", data) data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) bloque_tab = scrapertools.find_single_match(data, '<div id="verpelicula">(.*?)<div class="tab_container">') patron = '<li><a href="#([^<]+)"><span class="re">\d<\/span><span class="([^<]+)"><\/span><span class=.*?>([^<]+)<\/span>' check = re.compile(patron, re.DOTALL).findall(bloque_tab) servers_data_list = [] patron = '<div id="(tab\d+)" class="tab_content"><script type="text/rocketscript">(\w+)\("([^"]+)"\)</script></div>' matches = re.compile(patron, re.DOTALL).findall(data) if len(matches) == 0: patron = '<div id="(tab\d+)" class="tab_content"><script>(\w+)\("([^"]+)"\)</script></div>' matches = re.compile(patron, re.DOTALL).findall(data) for check_tab, server, id in matches: if check_tab in str(check): idioma, calidad = scrapertools.find_single_match(str(check), "" + check_tab + "', '(.*?)', '(.*?)'") servers_data_list.append([server, id, idioma, calidad]) url = host + "/Js/videod.js" data = httptools.downloadpage(url).data data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) data = data.replace('<iframe width="100%" height="400" scrolling="no" frameborder="0"', '') patron = 'function (\w+)\(id\).*?' patron += 'data-src="([^"]+)"' matches = scrapertools.find_multiple_matches(data, patron) for server, url in matches: for enlace, id, idioma, calidad in servers_data_list: if server == enlace: video_url = re.sub(r"embed\-|\-.*?x.*?\.html|u\'|\'\(", "", str(url)) video_url = re.sub(r"'\+codigo\+'", "", video_url) video_url = video_url.replace('embed//', 'embed/') video_url = video_url + id if "goo.gl" in video_url: try: from unshortenit import unshorten url = unshorten(video_url) video_url = scrapertools.get_match(str(url), "u'([^']+)'") except: continue title = "Ver en: %s [" + idioma + "][" + calidad + "]" itemlist.append( item.clone(title=title, url=video_url, action="play", thumbnail=item.category, language=idioma, quality=calidad)) tmdb.set_infoLabels(itemlist) itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize()) if __comprueba_enlaces__: itemlist = servertools.check_list_links(itemlist, __comprueba_enlaces_num__) if item.library and config.get_videolibrary_support() and len(itemlist) > 0: infoLabels = {'tmdb_id': item.infoLabels['tmdb_id'], 'title': item.fulltitle} itemlist.append(Item(channel=item.channel, title="Añadir esta película a la videoteca", action="add_pelicula_to_library", url=item.url, infoLabels=infoLabels, text_color="0xFFff6666", thumbnail='http://imgur.com/0gyYvuC.png')) return itemlist
def test_invalid(self): test_links = [ 'meloinvento', 'htp:/kk', 'wwww.kk.es', 'httpp://www.kk.es' ] for link in test_links: obtained_result = unshortenit.unshorten(link) self.assertEqual(obtained_result, (link, unshortenit.INVALID_URL_ERROR_CODE))
def test_hrefli(self): test_links = [ ('https://href.li/?http://example.com/', ('http://example.com/', 200)), ('https://href.li/?http://stackoverflow.com/', ('http://stackoverflow.com/', 200)), ] for link, correct_result in test_links: obtained_result = unshortenit.unshorten(link) self.assertEqual(obtained_result, correct_result)
def expand_atlauncher_url(original_url, download_type): if download_type == 'direct': return original_url elif download_type == 'server': # Note, pathname2url for applying percent encoding - # "Pams HarvestCraft 1.7.10c.jar" is an example of something that # needs percent-encoding. return URL_BASE + original_url.replace(' ', '%20') elif download_type == 'browser': if 'http://adf.ly' in original_url: status = unshortenit.unshorten(original_url) if status[1] == 200: # 200 = HTTP OK print('Unshortened {url1} to {url2}.'.format(url1=original_url, url2=status[0])) return status[0] else: return 'INVALID ADFLY LINK OR OTHER HTTP ERROR' return 'Download file manually'
def expand_atlauncher_url(original_url, download_type): if download_type == 'direct': return original_url elif download_type == 'server': # Note, pathname2url for applying percent encoding - # "Pams HarvestCraft 1.7.10c.jar" is an example of something that # needs percent-encoding. return URL_BASE + original_url.replace(' ', '%20') elif download_type == 'browser': if 'http://adf.ly' in original_url: status = unshortenit.unshorten(original_url) if status[1] == 200: # 200 = HTTP OK print ('Unshortened {url1} to {url2}.'.format(url1=original_url, url2=status[0])) return status[0] else: return 'INVALID ADFLY LINK OR OTHER HTTP ERROR' return 'Download file manually'
def Get_epis_links(name, url): #11 lcookie = cache.get(_Login, 4, BASEURL) OPEN = cache.get(client.request, 4, url, True, True, False, None, None, None, False, None, None, lcookie) #Regex2 = re.compile('<a href="(http[s]?://adf.ly.+?|http[s]?://vidlox.+?|http[s]?://openload.+?|http[s]?://vidto.+?|http[s]?://streamin.+?|http[s]?://flashx.+?)".+?target="_blank".*?>(.*?)</a>', re.DOTALL).findall(OPEN) data = client.parseDOM(OPEN, 'td', attrs={'class': 'easySpoilerRow'}) links = [] for i in data: links += zip( client.parseDOM(i, 'a', ret='href', attrs={'target': '_blank'}), client.parseDOM(i, 'a')) description = Sinopsis(url) trailer = Trailer(url) addDir('[B][COLOR white]%s | [B][COLOR lime]Trailer[/COLOR][/B]' % name, trailer, 100, iconimage, FANART, '') for url, title in links: title = re.sub('\d{4}', '', title) title = clear_Title(title) title = Lang(32018).encode('utf-8') if title == "" else title.encode( 'utf-8') url = re.sub('http://adf.ly/\d+/', '', url) if 'buck' in url: continue elif 'adf.ly' in url: url = unshortenit.unshorten(url) if not url[1] == 200: continue else: url = url[0] if 'easybytez' in url: continue if 'zippy' in url: continue addDir('[B][COLOR white]%s[/COLOR][/B]' % title, url, 100, iconimage, FANART, str(description)) views.selectView('movies', 'movie-view')
def test_adfocus(self): result = unshortenit.unshorten('http://adfoc.us/340347863622') self.assertEqual( result, ('http://www7.zippyshare.com/v/24727439/file.html', 200))
def test_generic(self): self.assertEqual(unshortenit.unshorten('http://ul.to'), ('http://uploaded.net/', 200)) self.assertEqual(unshortenit.unshorten('http://t.co/fsbtLWPUIJ'), ('http://www.igen.fr/app-store/drift-mania-street-outlaws-du-drift-son-paroxysme-108452', 200)) self.assertEqual(unshortenit.unshorten('http://p.ost.im/d7DPHP'), ('http://crazymikesapps.com/drift-mania-street-outlaws-video-review/', 200))
def test_adfocus(self): result = unshortenit.unshorten('http://adfoc.us/340347863622') self.assertEqual( result, ('http://www7.zippyshare.com/v/24727439/file.html', 200) )
def test_google_unwrap(self): self.assertEqual(unshortenit.unshorten('https://www.google.com/url?url=https://en.wikipedia.org/wiki/Google&rct=j&q=&esrc=s&sa=U&ved=0ahUKEwijvuKNxdXKAhVT0GMKHQSDAp8QFgg3MAs&sig2=nsR8hgyoNqY87WcWVtt9Hw&usg=AFQjCNFUmLH6w9LpY157wHV4SowfxvZ4Ig'), ('https://en.wikipedia.org/wiki/Google', 200))
def url_expander(url): unshortened_uri,status = unshortenit.unshorten(url) return unshortened_uri
def test_shst(self): # If you HEAD 'https://adf.ly/b2H0Y', it returns 'http://ay.gy/b2H0Y' for... some reason # Also, a shortener url to another shortener? Really? self.assertEqual(unshortenit.unshorten('http://sh.st/INTI'), ('http://ay.gy/b2H0Y', 200))
def findvideos(item): logger.info("pelisalacarta.peliculasdk findvideos") itemlist = [] data = scrapertools.cache_page(item.url) data = re.sub(r"<!--.*?-->","",data) data = re.sub(r"\n|\r|\t|\s{2}| ","",data) bloque_tab= scrapertools.find_single_match(data,'<div id="verpelicula">(.*?)<div class="tab_container">') patron ='<li><a href="#([^<]+)"><span class="re">\d<\/span><span class="([^<]+)"><\/span><span class=.*?>([^<]+)<\/span>' check= re.compile(patron,re.DOTALL).findall(bloque_tab) servers_data_list = [] patron = '<div id="(tab\d+)" class="tab_content"><script type="text/rocketscript">(\w+)\("([^"]+)"\)</script></div>' matches = re.compile(patron,re.DOTALL).findall(data) if len(matches)==0: patron = '<div id="(tab\d+)" class="tab_content"><script>(\w+)\("([^"]+)"\)</script></div>' matches = re.compile(patron,re.DOTALL).findall(data) for check_tab ,server, id in matches: scrapedplot = scrapertools.get_match(data,'<span class="clms">(.*?)</div></div>') plotformat = re.compile('(.*?:) </span>',re.DOTALL).findall(scrapedplot) scrapedplot = scrapedplot.replace(scrapedplot,bbcode_kodi2html("[COLOR white]"+scrapedplot+"[/COLOR]")) for plot in plotformat: scrapedplot = scrapedplot.replace(plot,bbcode_kodi2html("[COLOR red][B]"+plot+"[/B][/COLOR]")) scrapedplot = scrapedplot.replace("</span>","[CR]") scrapedplot = scrapedplot.replace(":","") if check_tab in str(check): idioma, calidad = scrapertools.find_single_match(str(check),""+check_tab+"', '(.*?)', '(.*?)'") servers_data_list.append ([server,id, idioma, calidad]) url = "http://www.peliculasdk.com/Js/videod.js" data = scrapertools.cachePage(url) data = re.sub(r"\n|\r|\t|\s{2}| ","",data) data = data.replace ('<iframe width="100%" height="400" scrolling="no" frameborder="0"','') patron = 'function (\w+)\(id\).*?' patron+= 'data-src="([^"]+)"' matches = re.compile(patron,re.DOTALL).findall(data) for server, url in matches: for enlace , id, idioma, calidad in servers_data_list: if server ==enlace: video_url = re.sub(r"embed\-|\-.*?x.*?\.html|u\'|\'\(","",str(url)) video_url = re.sub(r"'\+codigo\+'","",video_url) video_url= video_url.replace('embed//','embed/') video_url= video_url + id if "goo.gl" in video_url: try: from unshortenit import unshorten url =unshorten(video_url) video_url = scrapertools.get_match(str(url),"u'([^']+)'") except: continue servertitle = scrapertools.get_match(video_url,'http.*?://(.*?)/') servertitle = servertitle.replace(servertitle,bbcode_kodi2html("[COLOR red]"+servertitle+"[/COLOR]")) servertitle = servertitle.replace("embed.","") servertitle = servertitle.replace("player.","") servertitle = servertitle.replace("api.video.","") servertitle = servertitle.replace("hqq.tv","netu.tv") servertitle = servertitle.replace("anonymouse.org","netu.tv") title = bbcode_kodi2html("[COLOR orange]Ver en --[/COLOR]") + servertitle +" "+ idioma +" "+ calidad itemlist.append( Item(channel=item.channel, title =title , url=video_url, action="play", thumbnail=item.category, plot=scrapedplot, fanart=item.show ) ) if item.library and config.get_library_support() and len(itemlist) > 0 : infoLabels = {'tmdb_id': item.infoLabels['tmdb_id'], 'title': item.fulltitle} itemlist.append(Item(channel=item.channel, title="Añadir esta película a la biblioteca", action="add_pelicula_to_library", url=item.url, infoLabels=infoLabels, text_color="0xFFff6666", thumbnail='http://imgur.com/0gyYvuC.png')) return itemlist
def url_expander(url): unshortened_uri, status = unshortenit.unshorten(url) return unshortened_uri
def clean_url(url): unshortened_uri, status = unshortenit.unshorten(url) return unshortened_uri
def test_google_unwrap(self): self.assertEqual( unshortenit.unshorten( 'https://www.google.com/url?url=https://en.wikipedia.org/wiki/Google&rct=j&q=&esrc=s&sa=U&ved=0ahUKEwijvuKNxdXKAhVT0GMKHQSDAp8QFgg3MAs&sig2=nsR8hgyoNqY87WcWVtt9Hw&usg=AFQjCNFUmLH6w9LpY157wHV4SowfxvZ4Ig' ), ('https://en.wikipedia.org/wiki/Google', 200))
def findvideos(item): logger.info() itemlist = [] data = scrapertools.cache_page(item.url) data = re.sub(r"<!--.*?-->", "", data) data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) bloque_tab = scrapertools.find_single_match( data, '<div id="verpelicula">(.*?)<div class="tab_container">') patron = '<li><a href="#([^<]+)"><span class="re">\d<\/span><span class="([^<]+)"><\/span><span class=.*?>([^<]+)<\/span>' check = re.compile(patron, re.DOTALL).findall(bloque_tab) servers_data_list = [] patron = '<div id="(tab\d+)" class="tab_content"><script type="text/rocketscript">(\w+)\("([^"]+)"\)</script></div>' matches = re.compile(patron, re.DOTALL).findall(data) if len(matches) == 0: patron = '<div id="(tab\d+)" class="tab_content"><script>(\w+)\("([^"]+)"\)</script></div>' matches = re.compile(patron, re.DOTALL).findall(data) for check_tab, server, id in matches: scrapedplot = scrapertools.get_match( data, '<span class="clms">(.*?)</div></div>') plotformat = re.compile('(.*?:) </span>', re.DOTALL).findall(scrapedplot) scrapedplot = scrapedplot.replace( scrapedplot, bbcode_kodi2html("[COLOR white]" + scrapedplot + "[/COLOR]")) for plot in plotformat: scrapedplot = scrapedplot.replace( plot, bbcode_kodi2html("[COLOR red][B]" + plot + "[/B][/COLOR]")) scrapedplot = scrapedplot.replace("</span>", "[CR]") scrapedplot = scrapedplot.replace(":", "") if check_tab in str(check): idioma, calidad = scrapertools.find_single_match( str(check), "" + check_tab + "', '(.*?)', '(.*?)'") servers_data_list.append([server, id, idioma, calidad]) url = "http://www.peliculasdk.com/Js/videod.js" data = scrapertools.cachePage(url) data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) data = data.replace( '<iframe width="100%" height="400" scrolling="no" frameborder="0"', '') patron = 'function (\w+)\(id\).*?' patron += 'data-src="([^"]+)"' matches = re.compile(patron, re.DOTALL).findall(data) for server, url in matches: for enlace, id, idioma, calidad in servers_data_list: if server == enlace: video_url = re.sub(r"embed\-|\-.*?x.*?\.html|u\'|\'\(", "", str(url)) video_url = re.sub(r"'\+codigo\+'", "", video_url) video_url = video_url.replace('embed//', 'embed/') video_url = video_url + id if "goo.gl" in video_url: try: from unshortenit import unshorten url = unshorten(video_url) video_url = scrapertools.get_match( str(url), "u'([^']+)'") except: continue servertitle = scrapertools.get_match(video_url, 'http.*?://(.*?)/') servertitle = servertitle.replace("embed.", "") servertitle = servertitle.replace("player.", "") servertitle = servertitle.replace("api.video.", "") servertitle = re.sub(r"hqq.tv|hqq.watch", "netutv", servertitle) servertitle = servertitle.replace("anonymouse.org", "netu") title = servertitle logger.debug('servertitle: %s' % servertitle) server = servertools.get_server_name(servertitle) logger.debug('server: %s' % server) itemlist.append( Item(channel=item.channel, title=title, url=video_url, action="play", thumbnail=item.category, plot=scrapedplot, fanart=item.show, server=server, language=idioma, quality=calidad)) if item.library and config.get_videolibrary_support( ) and len(itemlist) > 0: infoLabels = { 'tmdb_id': item.infoLabels['tmdb_id'], 'title': item.fulltitle } itemlist.append( Item(channel=item.channel, title="Añadir esta película a la videoteca", action="add_pelicula_to_library", url=item.url, infoLabels=infoLabels, text_color="0xFFff6666", thumbnail='http://imgur.com/0gyYvuC.png')) return itemlist
def test_shst(self): self.assertEqual(unshortenit.unshorten('http://sh.st/INTI'), ('https://adf.ly/b2H0Y', 200))
def test_generic(self): self.assertEqual(unshortenit.unshorten('http://ul.to'), ('http://uploaded.net/', 200)) self.assertEqual(unshortenit.unshorten('http://t.co/fsbtLWPUIJ'), ('http://www.igen.fr/app-store/drift-mania-street-outlaws-du-drift-son-paroxysme-108452', 200))
def unshortenit(url): import unshortenit unshortened_uri, status = unshortenit.unshorten(url) return unshortened_uri