def test_tinyurl_shortener(self): engine = 'TinyurlShortener' short = Shortener(engine) self.assertEqual(short.short('http://www.google.com'), 'http://tinyurl.com/1c2') self.assertEqual(short.expand('http://tinyurl.com/ycus76'), 'https://www.facebook.com')
def test_googl_shortener(self): engine = 'GoogleShortener' short = Shortener(engine) self.assertEqual(short.short('http://www.google.com'), 'http://goo.gl/fbsS') self.assertEqual(short.expand('http://goo.gl/fbsS'), 'http://www.google.com/')
def test_google_bad_params(): s = Shortener('GoogleShortener') with pytest.raises(TypeError): s.short(expanded) with pytest.raises(TypeError): s.expand(expanded)
def test_sentala_shortener(self): engine = 'SentalaShortener' short = Shortener(engine) url = 'http://www.pilgrims.com' shorten = short.short(url) expand = short.expand(shorten) self.assertEqual(expand, url) self.assertEqual(short.qrcode(), 'http://chart.apis.google.com/' 'chart?cht=qr&chl={}&chs=120x120'.format(shorten))
def test_generic_expander(self): # testing new generic expander. Uses another shortener to test short = Shortener("TinyurlShortener") shorten = short.short(self.test_url) engine = "GenericExpander" expander = Shortener(engine) result_url = expander.expand(shorten) # A valid url result is enough for answer self.assertEqual(result_url, self.test_url)
def url_shortener(url): '''Shorten url through google api :param url: url to be shortened, with urlencode :param SHORTENER_API_KEY: goo.gl shortener key ''' from pyshorteners.shorteners import Shortener shortener = Shortener('GoogleShortener', api_key=os.environ['SHORTENER_API_KEY']) short_url = shortener.short(url) return short_url
def test_qrcx_shortener(self): engine = 'QrCxShortener' short = Shortener(engine) url = 'https://www.facebook.com/' shorten = short.short(url) expand = short.expand(shorten) self.assertEqual(expand, url) self.assertEqual(short.qrcode(), 'http://chart.apis.google.com/' 'chart?cht=qr&chl={}&chs=120x120'.format(shorten))
def test_dottk_shortener(self): engine = 'DottkShortener' short = Shortener(engine) url = 'http://www.google.com/' short.short = MagicMock(return_value='http://3vzpu.tk') short.short(url) short.short.assert_called_with(url) expand = short.expand('http://adf.ly/test') self.assertEqual(expand, 'http://adf.ly/test')
def test_is_valid_url(self): bad = 'www.google.com' good = 'http://www.google.com' self.assertTrue(is_valid_url(good)) self.assertFalse(is_valid_url(bad)) s = Shortener('TinyurlShortener') with self.assertRaises(ValueError): url = 'http://12' s.short(url)
def minify_callback(word, word_eol, userdata): shorty = Shortener('Isgd') words = word_eol[0].split() # word_eol[0] is the full text message finalwords = '' for word in words: if validators.url(word): word = shorty.short(word) finalwords += word + ' ' currentchannel = hexchat.get_info('channel') hexchat.command("MSG " + currentchannel + " " + finalwords) return hexchat.EAT_ALL
def test_adfly_shortener(self): engine = 'AdflyShortener' short = Shortener(engine, key='abcd', uid='123') url = 'http://www.google.com/' short.short = MagicMock(return_value='http://adf.ly/test') short.short(url) short.short.assert_called_with(url) expand = short.expand('http://adf.ly/test') self.assertEqual(expand, 'http://adf.ly/test')
def test_tinyurl_shortener(self): engine = 'TinyurlShortener' short = Shortener(engine) url = 'http://tinyurl.com/nc9m936' shorten = short.short(self.test_url) self.assertEqual(shorten, url) self.assertEqual(short.expand(), self.test_url) self.assertEqual(short.expand(url), self.test_url) self.assertEqual(short.expanded, self.test_url) self.assertEqual(short.shorten, url) self.assertEqual(short.qrcode(), 'http://chart.apis.google.com/' 'chart?cht=qr&chl={}&chs=120x120'.format(shorten))
def main(): args = parse_args() targetlist = "" colorpairs = 0 for target in args.tests: targets = get_targets(target, COLORS[colorpairs % len(COLORS)], avg=args.smoothing) colorpairs += 1 subtarglist = "&".join(targets) targetlist = "&".join([targetlist, subtarglist]) url = "&".join(( graphite_base_url(args.duration, args.smoothing), targetlist)) webbrowser.open(url) shortener = Shortener('TinyurlShortener') print "URL for sharing: %s" % shortener.short(url)
def test_shorteners_type(self): shorteners = ['GoogleShortener', 'BitlyShortener', 'TinyurlShortener', 'AdflyShortener', 'IsgdShortener', 'SentalaShortener', 'GenericExpander', 'OwlyShortener'] for shortener in shorteners: short = Shortener(shortener) self.assertEqual(type(short), short.__class__)
def shortener(self): if not self.__shortener: shortener_config = get_setting_value('SHORTENER_SETTINGS', {}).copy() shortener_name = shortener_config.pop('name') self.__shortener = Shortener(shortener_name, **shortener_config) return self.__shortener
def send_text(title, link): url = str(link) shortener = Shortener('TinyurlShortener') print "My short url is {}".format(shortener.short(url)) text_link = format(shortener.short(url)) text_body = "Price has lowered for " + title + " " + text_link client.messages.create( to="3153825951", from_="+14159643618", body=text_body, ) print "Sent text"
class RSSPlugin(BotPlugin): RSS_MSG_FORMAT = '{0} > {1} > {2}' def setup(self, *args, **kwargs): self.storage = self.open_storage('rss') self.feeds = self.config['feeds'] self.max_stories = int(self.config.get('max_stories', 5)) self.update_interval = int(self.config.get('update_interval', 30)) self.max_link_length = int(self.config.get('max_link_length', 50)) self.entry_cache_size = int(self.config.get('entry_cache_size', 100)) self.shortener = Shortener('IsgdShortener') for feed in self.feeds: if feed not in self.storage: self.storage[feed] = [] self.loop_task(self.update_interval, self.check_feeds, now=False) def hash_entry(self, entry): """Creates a hash out of the feedparser's Entry. Uses just the title and the link as that is what we care about in most cases.""" return hashlib.sha224("{}{}".format(entry.title, entry.link)).hexdigest() def check_feeds(self): """"Periodically checks for new entries in given (configured) feeds.""" for feed in self.feeds: d = feedparser.parse(feed) past_entries = self.storage[feed] i = 1 for entry in d.entries: hash = self.hash_entry(entry) if hash in past_entries: continue if i > self.max_stories: break self.delay_task(i, self.sender(d, entry)) i += 1 past_entries.insert(0, hash) self.storage[feed] = past_entries[:self.entry_cache_size] return '' def sender(self, d, entry): """A helper function that takes care of sending the entry that we regard as 'new' to proper places. Moreover, it takes care of formatting the raw entry into textual representation and shortening the entry link if it is too long.""" link = entry.link if len(link) > self.max_link_length: link = self.shortener.short(link) s = self.RSS_MSG_FORMAT.format(d.feed.title, entry.title, link) self.msg(s)
def test_generic_expander(self): # testing new generic expander. Uses another shortener to test short = Shortener("TinyurlShortener") shorten = short.short(self.test_url) engine = "GenericExpander" expander = Shortener(engine) with self.assertRaises(NotImplementedError): expander.short('http://www.test.com') result_url = expander.expand(shorten) # A valid url result is enough for answer self.assertEqual(result_url, self.test_url)
def test_adfly_shortener(self): engine = 'AdflyShortener' short = Shortener(engine, key='abcd', uid='123') url = 'http://www.google.com/' short.short = MagicMock(return_value='http://adf.ly/test') short.short(url) short.short.assert_called_with(url) expand = short.expand('http://adf.ly/test') self.assertEqual(expand, 'http://adf.ly/test') # test with no key params with self.assertRaises(TypeError): short = Shortener(engine).short('http://www.google.com')
def setup(self, *args, **kwargs): self.storage = self.open_storage('rss') self.feeds = self.config['feeds'] self.max_stories = int(self.config.get('max_stories', 5)) self.update_interval = int(self.config.get('update_interval', 30)) self.max_link_length = int(self.config.get('max_link_length', 50)) self.entry_cache_size = int(self.config.get('entry_cache_size', 100)) self.shortener = Shortener('IsgdShortener') for feed in self.feeds: if feed not in self.storage: self.storage[feed] = [] self.loop_task(self.update_interval, self.check_feeds, now=False)
def test_bitly_shortener(self): engine = 'BitlyShortener' short = Shortener(engine, bitly_api_key='abc', bitly_login='******') url = 'http://www.google.com/' short_url = 'http://bit.ly/xxx' short.short = MagicMock(return_value='http://bit.ly/SsdA') short.short(url) short.short.assert_called_with(url) #expanding short.expand = MagicMock(return_value=url) short.expand(short_url) short.expand.assert_called_with(short_url)
def test_readability_shortener(self): engine = 'ReadabilityShortener' short = Shortener(engine) url = 'http://blog.arc90.com/2010/11/30/silence-is-golden/' short_url = 'http://rdd.me/tg8if9uj' readbility_url = 'http://readability.com/articles/tg8if9uj' shorten = short.short(url) self.assertEqual(shorten, short_url) expand = short.expand(shorten) self.assertEqual(expand, readbility_url) # Test wrong url_id short = Shortener(engine) with self.assertRaises(ExpandingErrorException): expand = short.expand('http://www.wqe.cc')
def test_googl_shortener(self): engine = 'GoogleShortener' short = Shortener(engine) url = 'http://goo.gl/rjf0oI' shorten = short.short(self.test_url) self.assertEqual(shorten, url) self.assertEqual(short.expand(), self.test_url) self.assertEqual(short.expanded, self.test_url) self.assertEqual(short.shorten, url) self.assertEqual(short.qrcode(), 'http://chart.apis.google.com/' 'chart?cht=qr&chl={}&chs=120x120'.format(shorten)) # test exceptions with self.assertRaises(ExpandingErrorException): short.expand('http://www.a.co')
def test_googl_shortener(self): engine = 'GoogleShortener' short = Shortener(engine) url = 'http://goo.gl/rjf0oI' shorten = short.short(self.test_url) self.assertEqual(shorten, url) self.assertEqual(short.expand(), self.test_url) self.assertEqual(short.expanded, self.test_url) self.assertEqual(short.shorten, url) self.assertEqual(short.qrcode(), 'http://chart.apis.google.com/' 'chart?cht=qr&chl={}&chs=120x120'.format(shorten)) #test exceptions with self.assertRaises(ExpandingErrorException): short.expand('http://www.a.co')
class ImdbLookup(BaseModule): matchers = {"!imdb": "find_movie", "!i\s+": "find_movie"} short_url_handle = Shortener('GoogleShortener') imdb_handle = Imdb({'anonymize': True}) def __init__(self, args): """ Initialize the class as a subclass of BaseModule and call parent constructor with the defined matchers. These will be turned into regex-matchers that redirect to the provided function name """ super(self.__class__,self).__init__(self) def find_movie(self,msg): """ Search for a movie title and get a summary """ movie = self.get_movie(msg.clean_contents) if movie: title = movie.title.encode('utf-8') plot = movie.plot["outline"].encode('utf-8') rating = movie.rating trailer = movie.trailers[movie.trailers.keys()[0]] trailer = self.short_url_handle.short(trailer).encode('utf-8') if len(trailer)>75 else trailer msg.reply("\x02{title}\x02 ({rating}/10): {plot}".format(title=title, plot=plot, rating=rating)) msg.reply("\x02Read more: \x02http://www.imdb.com/title/{movie_id}/ or \x02Watch the trailer:\x02 {trailer}".format(movie_id=movie.imdb_id, trailer=trailer)) else: msg.reply("I couldn't find that movie :(") return msg def get_movie(self, title): results = self.imdb_handle.find_by_title(title) return False if not results else self.imdb_handle.find_movie_by_id(results.pop(0)["imdb_id"])
def short(url): shurl = Shortener(choice(shorts)) return shurl.short(url)
def test_bitly_shortener(self): engine = 'BitlyShortener' short = Shortener(engine, bitly_api_key='abc', bitly_login='******') url = 'http://www.google.com/' short_url = 'http://bit.ly/xxx' # test with no mock with self.assertRaises(ShorteningErrorException): short = short.short(url) # mocking the results short.expand = MagicMock(return_value=url) short.short = MagicMock(return_value='http://bit.ly/SsdA') short.short(url) short.short.assert_called_with(url) short.expand(short_url) short.expand.assert_called_with(short_url) # test with no key params with self.assertRaises(TypeError): short = Shortener(engine).short('http://www.google.com')
from gshares.models import News_Co #symbols = ["AERO","AGTK","ATTBF","AVTC","BRDT","CANL","CANV","CBDS","CBGI","CBIS","CGRW","CHUM","CNBX","EAPH","EDXC"] #symbols = ["ENDO","ENRT","ERBB","FITX","FULL","FWDG","GBLX","GRCU","GRNH","GWPH","HEMP","HMKTF","ICBU","IMLFF","LGBI"] #symbols = ["LVVV","MCIG","MDBX","MDCN","MJMJ","MJNA","MJNE","MNTR","MYEC","NDEV","NTRR","NVLX","PHOT","PLPL","PMCM"] symbols = ["QEDN","REFG","RSSFF","SING","SPRWF","SRNA","STEV","TAUG","THCZ","TRTC","TURV","UPOT","VAPO","VPOR","VAPE"] context_list = [ ] for s in symbols: try: count = 0 while count < 5: url = 'http://finance.yahoo.com/rss/headline?s=%s' % s shortener = Shortener('GoogleShortener') d = feedparser.parse(url) v = d.entries[count].link x = shortener.short(v) y = d.entries[count].title p = d.entries[count].published r = p.split() del r[0] del r[3:] l = " ".join(str(i) for i in r) d = datetime.strptime(l,'%d %b %Y') count += 1 q = News_Co(symbol=s, title=y, link=x, date=d) q.save() print "saving to db: %s" % s
def test_wrong_shortener_engine(self): engine = 'UnknownShortener' with self.assertRaises(UnknownShortenerException): Shortener(engine)
import pygtk pygtk.require('2.0') import gtk from pyshorteners.shorteners import Shortener # get the clipboard clipboard = gtk.clipboard_get() #read clipboard for the text copied url_original = clipboard.wait_for_text() try: shortener = Shortener('TinyurlShortener') url = format(shortener.short(url_original)) # set the clipboard data as shortened url clipboard.set_text(url) except: clipboard.set_text(url_original) # Storing short url to clipboard clipboard.store()
def shortIt(self): shortener = Shortener('GoogleShortener') return format(shortener.short(self.url))
# -*- coding: utf-8 -*- from flask import Flask, g, render_template, abort, request from flask import url_for, redirect, session from passlib.hash import pbkdf2_sha256 from contextlib import closing import os import psycopg2 import datetime import markdown from pyshorteners.shorteners import Shortener shortener = Shortener('GoogleShortener') # _____SQL SCRIPTS_____ DB_SCHEMA = """ DROP TABLE IF EXISTS entries; CREATE TABLE entries ( id serial PRIMARY KEY, title VARCHAR (127) NOT NULL, text TEXT NOT NULL, created TIMESTAMP NOT NULL ) """ DB_ENTRY_INSERT = """ INSERT INTO entries (title, text, created) VALUES (%s, %s, %s) """ DB_ENTRIES_LIST = """ SELECT id, title, text, created FROM entries ORDER BY created DESC """
def get_shorturl(url): shortener = Shortener("TinyurlShortener") return shortener.short(url)
def main(): args = parse_args() url = get_graphite_url(args.tests, args.smoothing, args.duration) webbrowser.open(url) shortener = Shortener('Tinyurl') print "URL for sharing: %s" % shortener.short(url)
def get_ed_page(self, search="", where="ed"): if search == "": return False if where == "ed": search_url = "https://encyclopediadramatica.se/index.php?{search}".format( search=search) else: search_url = "http://uncyclopedia.wikia.com/index.php?title=Special%3ASearch&{search}&go=Go".format( search=search) page = urllib.urlopen(search_url) soup = BeautifulSoup(page.read()) print("https://encyclopediadramatica.se/index.php?{search}".format( search=search)) p = re.compile( '<title>(.+) \- (Encyclopedia Dramatica|Uncyclopedia, the content\-free encyclopedia)<\/title>' ) paragraph = soup.find('div', {'id': 'bodyContent'}).findChildren('p') print(paragraph) if 'There is currently no text' in paragraph[0].text.encode('utf-8').strip() \ or "Create the page" in paragraph[0].text.encode('utf-8').strip() \ or not soup: print("Nope") return False else: title = "" title = p.findall(str(soup.title)) if isinstance(title, list) and len(title): title = title[0] if isinstance(title, tuple): title = title[0] url = "" scanning_paragraphs = 1 words = 0 paragraphs = [] for t in paragraph: print(t) if scanning_paragraphs > 380 or words > 55: if words > 80: paragraphs[-1] += "..." break if t.text.strip() != u'': paragraphs.append(t.text.encode('utf-8').strip()) words += len(t.text.encode('utf-8').strip().split()) scanning_paragraphs += len(t.text.encode('utf-8').strip()) short_url_handle = Shortener('GoogleShortener') summary = paragraphs print(page.url) try: link = short_url_handle.short(page.url).encode('utf-8') except: link = page.url if where == "ed": base_url = "/".join(page.url.split('/')[:-1]) else: base_url = "/".join(page.url.split('/')[:-2]) return_dict = {"title": title, "summary": summary, "url": link} all_pics = soup.find('div', { 'id': 'bodyContent' }).findChild('div', {'class': 'floatright'}) if all_pics: all_pics = all_pics.findAll('a', {'class': 'image'}) picture = None for picture in all_pics: if "Main_Page" not in picture['href']: picture = picture["href"] break if picture: picture = base_url + picture picture_page = urllib.urlopen(picture) print(picture) picture_soup = BeautifulSoup(picture_page.read()) large_pic = picture_soup.find('div', { 'id': 'file' }).findChild('a')['href'] print(large_pic) if not large_pic: pic_link = short_url_handle.short(picture).encode( 'utf-8') else: image = self.imgur_handle.upload_image( url=large_pic, title="from {url}".format(url=link)) pic_link = image.link if image and image.link else short_url_handle.short( picture).encode('utf-8') return_dict["pic_url"] = pic_link gallery = soup.find('table', {'class': 'gallery'}) if gallery: gallery = gallery.findAll('a', {'class': 'image'}) return return_dict
#!/usr/bin/env python # encoding: utf-8 import json from pyshorteners.shorteners import Shortener from pyshorteners.exceptions import (ShorteningErrorException, ExpandingErrorException) import responses import pytest s = Shortener('ReadabilityShortener') shorten = 'http://rdd.me/test' expanded = 'http://www.test.com' @responses.activate def test_readability_short_method(): # mock responses body = json.dumps({'meta': {'rdd_url': shorten}}) responses.add(responses.POST, s.api_url, body=body) shorten_result = s.short(expanded) assert shorten_result == shorten assert s.shorten == shorten_result assert s.expanded == expanded @responses.activate def test_readability_short_method_bad_response():
def short_it(link): shortener = Shortener('TinyurlShortener') return shortener.short(link.url)
import requests from bs4 import BeautifulSoup import re import time from pyshorteners.shorteners import Shortener from pprint import pprint GEMALTO_URL = "https://gemalto-recrute.talent-soft.com/offre-de-emploi/liste-offres.aspx?changefacet=1&facet_JobCountry=41&LCID=1033" shortener = Shortener('TinyurlShortener') r = requests.get(GEMALTO_URL) soup = BeautifulSoup(r.text) nombre_offre = re.findall(r'\d+', soup.find(class_="resultat").text)[0] print("Gemalto ({0}) :".format(nombre_offre)) results = soup.find(id="listing-resultat").find('ul').find_all('li', recursive=False) for result in results: publie_le = time.strftime('%d/%m/%Y', time.strptime(result.ul('li')[1].text, '%m/%d/%Y')) poste = result.h2.text.strip() url = result.h2.a['href'] print("\t{0} - {1} - {2}".format(publie_le, poste, shortener.short(url)))
def test_none_qrcode(self): shortener = Shortener('TinyurlShortener') self.assertIsNone(shortener.qrcode())
#d = feedparser.parse('http://mmjbusinessdaily.com/feed/') #d = feedparser.parse('http://feeds.sciencedaily.com/sciencedaily/mind_brain/marijuana') #d = feedparser.parse('http://www.420magazine.com/forums/external.php?type=rss&forumids=369') d = feedparser.parse('http://www.thedailychronic.net/feed/') listx = [ ] try: count = 0 while count < 10: d = feedparser.parse('http://www.thedailychronic.net/feed/') title = d['entries'][count]['title'] link_long = d['entries'][count]['link'] shortener = Shortener('GoogleShortener') link = shortener.short(link_long) published_long = d['entries'][count]['published'] r = published_long.split() del r[0] del r[3:] l = " ".join(str(i) for i in r) published = datetime.strptime(l,'%d %b %Y') summary = d['entries'][count]['summary'][0:290] + "..." count += 1 print count q = News_Other(title=title, link=link, published=published, summary=summary) q.save() except: pass
import requests from bs4 import BeautifulSoup import re from pyshorteners.shorteners import Shortener from pprint import pprint COORDINATION_SUD_URL="http://www.coordinationsud.org/espace-emploi/?pays%5B%5D=colombie&age=&mots=" shortener = Shortener('TinyurlShortener') r = requests.get(COORDINATION_SUD_URL) soup = BeautifulSoup(r.text) results = soup.find(class_='elements').find_all('article') nombre_offre = len(results) print("Coordination Sud ({0}) :".format(nombre_offre)) for result in results: publie_le = result.find(class_="date").text entreprise = result.find(class_="author").text poste = result.find(class_="entry-title").text.strip() url = result.find(class_="entry-title").find('a')['href'] print("\t{0} - {1} - {2} - {3}".format(publie_le, entreprise, poste, shortener.short(url)))