def urban(code, input): # clean and split the input try: if input.group(2): msg = input.group(2).lower().strip() tmp = msg.replace("-", "").split() if tmp[-1].isdigit(): if int(tmp[-1]) <= 0: id = 0 else: id = int(tmp[-1].replace("-", "")) - 1 del tmp[-1] msg = " ".join(tmp) else: id = 0 data = jsonify(get(uri % quote(msg)).read())["list"] if not data: return code.reply(error) max = len(data) if id > max: id = max data = data[max - 1] else: data = data[id] id += 1 msg = ( '({purple}{id}{c} of {purple}{max}{c}) "{purple}{word}{c}": {definition} +{red}{up}{c}/-{red}{down}{c}' ) if len(data["definition"]) > 235: data["definition"] = data["definition"][0:235] + "[...]" return code.say( code.format(msg).format( id=str(id), max=str(max), definition=strp(data["definition"]), word=data["word"], up=str(data["thumbs_up"]), down=str(data["thumbs_down"]), ) ) # Begin trying to get the definition else: # Get a random definition... data = jsonify(get(random_uri).read())["list"][0] if not data: return code.reply(error) msg = '(Definition for "{purple}{word}{c}"): {definition} +{red}{up}{c}/-{red}{down}{c}' if len(data["definition"]) > 235: data["definition"] = data["definition"][0:235] + "[...]" return code.say( code.format(msg).format( definition=strp(data["definition"]), word=data["word"], up=str(data["thumbs_up"]), down=str(data["thumbs_down"]), ) ) except: return code.reply("{red}{b}Failed to pull definition from urbandictionary.com!")
def urban(code, input): # clean and split the input try: if input.group(2): msg = input.group(2).lower().strip() tmp = msg.replace('-', '').split() if tmp[-1].isdigit(): if int(tmp[-1]) <= 0: id = 0 else: id = int(tmp[-1].replace('-', '')) - 1 del tmp[-1] msg = ' '.join(tmp) else: id = 0 data = jsonify(get(uri % quote(msg)).read())['list'] if not data: return code.reply(error) max = len(data) if id > max: id = max data = data[max - 1] else: data = data[id] id += 1 msg = '({purple}{id}{c} of {purple}{max}{c}) "{purple}{word}{c}": {definition} +{red}{up}{c}/-{red}{down}{c}' if len(data['definition']) > 235: data['definition'] = data['definition'][0:235] + '[...]' return code.say( code.format(msg).format(id=str(id), max=str(max), definition=strp(data['definition']), word=data['word'], up=str(data['thumbs_up']), down=str(data['thumbs_down']))) # Begin trying to get the definition else: # Get a random definition... data = jsonify(get(random_uri).read())['list'][0] if not data: return code.reply(error) msg = '(Definition for "{purple}{word}{c}"): {definition} +{red}{up}{c}/-{red}{down}{c}' if len(data['definition']) > 235: data['definition'] = data['definition'][0:235] + '[...]' return code.say( code.format(msg).format(definition=strp(data['definition']), word=data['word'], up=str(data['thumbs_up']), down=str(data['thumbs_down']))) except: return code.reply( '{red}{b}Failed to pull definition from urbandictionary.com!')
def domain(code, input): """domain <domain> -- Use domain.nr's domain API to find used and unused domains.""" err, domains = '{red}{b}Unable to find information on that domain name.', [] url = input.group(2) re_m = re.match(re.compile(r'http[s]?://([a-zA-Z0-9_.]{0,40}.*?)/?'), url) if re_m: url = re_m.group(1) try: data = loads(get(uri % url).read()) except: return code.say(err) if not data['query']: return code.say(err) for domain in data['results']: status = domain['availability'] if status in ['taken', 'unavailable']: color = 'red' elif status == 'tld': continue elif status == 'available': color = 'green' elif status == 'maybe': color = 'grey' else: print domain continue r = '{%s}%s{c}' % (color, domain['domain']) domains.append(r.replace('.', '{%s}.' % color)) # Add colors to the above twice because some clients auto parse URLs. # and... hopefully by adding colorcodes in the middle we can prevent that code.say('Domains: %s' % ' | '.join(domains))
def wikiSearch(query, url, results=5): """Use MediaWikis API to search for values from wiktionary and wikipedia""" # First, we need to grab the data, and serialize it in JSON url_query = urlify(query) data = jsonify(get(full_search % (lang, url, url_query)).read()) # Check to see if we have #(results as arg form) results, then make a list if not data[1]: return False if len(data[1]) > results: return data[1][:results] else: # Assume it's smaller than or = 5 return data[1]
def dinner(code, input): """fd -- WHAT DO YOU WANT FOR F*****G DINNER?""" err = '{red}EAT LEFT OVER PIZZA FOR ALL I CARE.' try: data = get(uri).read() results = re_mark.findall(data) if not results: return code.say(err) url, food = results[0][0], htmlescape(results[0][1]) code.say( 'WHY DON\'T YOU EAT SOME F*****G {b}%s{b}. HERE IS THE RECIPE: %s' % (food.upper(), url)) except: return code.say(err)
def dinner(code, input): """fd -- WHAT DO YOU WANT FOR F*****G DINNER?""" err = '{red}EAT LEFT OVER PIZZA FOR ALL I CARE.' try: data = get(uri).read() results = re_mark.findall(data) if not results: return code.say(err) url, food = results[0][0], htmlescape(results[0][1]) if hasattr(code.config, 'shortenurls'): if code.config.shortenurls: url = shorten(url) code.say('WHY DON\'T YOU EAT SOME F*****G {b}%s{b}. HERE IS THE RECIPE: %s' % (food.upper(), url)) except: return code.say(err)
def fucking_weather(code, input): """fw (ZIP|City, State) -- provide a ZIP code or a city state pair to hear about the f*****g weather""" if not input.group(2): return code.say('{red}{b}INVALID F*****G INPUT. PLEASE ENTER A F*****G ZIP CODE, OR A F*****G CITY-STATE PAIR.') try: text = quote(input.group(2)) data = get('http://thefuckingweather.com/?where=%s' % text).read() temp = re.compile(r'<p class="large"><span class="temperature" tempf=".*?">.*?</p>').findall(data)[0] temp = re.sub(r'\<.*?\>', '', temp).strip().replace(' ', '').replace('"', '') remark = re.compile(r'<p class="remark">.*?</p>').findall(data)[0] remark = re.sub(r'\<.*?\>', '', remark).strip() flavor = re.compile(r'<p class="flavor">.*?</p>').findall(data)[0] flavor = re.sub(r'\<.*?\>', '', flavor).strip() return code.say(web.htmlescape(temp) + ' ' + remark + '. ' + flavor) except: return code.say('{red}{b}I CAN\'T FIND THAT SHIT.')
def wikiDefine(term, url): """Use MediaWikis API to define a value from wiktionary and wikipedia""" # First, we need to grab the data, and serialize it in JSON url_query = urlify(term) data = jsonify(get(full_define % (lang, url, maxlen, url_query)).read())['query']['pages'] # We need to see if it was found. If it wasn't it'll be a -1 page for pageNumber, pageData in data.iteritems(): if pageNumber == '-1': # Assume failed to find return False else: # Assume found a result. Now, find and return the title/contents. if pageData['extract'].startswith('REDIRECT'): return False # This means it's a redirect page according to MediaWiki API title = pageData['title'] content = pageData['extract'].encode('ascii', 'ignore').replace('\n', ' ') while ' ' in content: content = content.replace(' ', ' ') return [title, content]
def fucking_weather(code, input): """fw (ZIP|City, State) -- provide a ZIP code or a city state pair to hear about the f*****g weather""" if not input.group(2): return code.say( '{red}{b}INVALID F*****G INPUT. PLEASE ENTER A F*****G ZIP CODE, OR A F*****G CITY-STATE PAIR.' ) try: text = quote(input.group(2)) data = get('http://thefuckingweather.com/?where=%s' % text).read() temp = re.compile( r'<p class="large"><span class="temperature" tempf=".*?">.*?</p>' ).findall(data)[0] temp = re.sub(r'\<.*?\>', '', temp).strip().replace(' ', '').replace('"', '') remark = re.compile(r'<p class="remark">.*?</p>').findall(data)[0] remark = re.sub(r'\<.*?\>', '', remark).strip() flavor = re.compile(r'<p class="flavor">.*?</p>').findall(data)[0] flavor = re.sub(r'\<.*?\>', '', flavor).strip() return code.say(web.htmlescape(temp) + ' ' + remark + '. ' + flavor) except: return code.say('{red}{b}I CAN\'T FIND THAT SHIT.')
def build_database(start_url, debug=False): urls = [start_url] emails = [] for url in urls: try: if debug: print url soup = BeautifulSoup(get(url).read()) for link in soup.findAll('a', href=True): if link['href'].startswith('mailto:'): emails.append(link['href'].replace('mailto:', '')) if debug: print "[!!] Email: "+link['href'] else: u = join(url, link['href']) urls.append(u) if debug: print u urls = list(set(urls)) emails = list(set(emails)) except Exception as e: #print "[!!!!!] EXCEPTION: "+str(e) #return list(emails) pass return list(emails)
def get_course_info(course_id): url = base_url + course_id try: source = get(url).read() except: return None soup = make_soup(source) courseNumber = soup.find('p', attrs={ 'class': 'courseNumber' }).contents[0].strip() try: courseTitle = soup.find('p', attrs={ 'class': 'courseTitle' }).contents[0].strip() except: courseTitle = 'No title available.' try: courseDesc = soup.find('div', attrs={ 'class': 'courseDesc' }).find('p').contents[0].strip() except: courseDesc = 'No description available.' attrs = soup.findAll('p', attrs={'class': 'courseAttrs'}) report = { 'identifier': course_id, 'courseNumber': courseNumber, 'courseTitle': courseTitle, 'courseDesc': courseDesc } for asoup in attrs: line = ' '.join(s.text if isinstance(s, Tag) else s.strip() for s in asoup) colon_index = line.index(':') k, v = line[:colon_index].strip(), line[colon_index + 1:].strip() report[k] = v return report
from BeautifulSoup import BeautifulSoup as make_soup from BeautifulSoup import Tag from urllib2 import urlopen as get from json import dumps as to_json import sys dept, name, url = sys.argv[1:4] soup = make_soup(get(url).read()) courses = soup.findAll('div', attrs={'class': 'course'}) report = {'name': name, 'identifier': dept, 'courses': {}} for course in courses: courseid = course.previousSibling.previousSibling['name'].strip() number = course.find('p', attrs={ 'class': 'courseNumber' }).contents[0].strip() try: title = course.find('p', attrs={ 'class': 'courseTitle' }).contents[0].strip() except: title = 'No title available.' try: desc = course.find('div', attrs={ 'class': 'courseDesc' }).find('p').contents[0].strip() except: desc = 'No description available.' attrs = course.findAll('p', attrs={'class': 'courseAttrs'}) report['courses'][courseid] = {
def fetch_image_from_url(url): return i.open(get(url))
from BeautifulSoup import BeautifulSoup as make_soup from BeautifulSoup import Tag from urllib2 import urlopen as get from json import dumps as to_json import sys dept, url = sys.argv[1:3] soup = make_soup(get(url).read()) courses = soup.findAll('div', attrs={'class': 'course'}) report = {} for course in courses: courseid = course.previousSibling.previousSibling['name'].strip() number = course.find('p', attrs={'class': 'courseNumber'}).contents[0].strip() try: title = course.find('p', attrs={'class': 'courseTitle'}).contents[0].strip() except: title = 'No title available.' try: desc = course.find('div', attrs={'class': 'courseDesc'}).find('p').contents[0].strip() except: desc = 'No description available.' attrs = course.findAll('p', attrs={'class': 'courseAttrs'}) report[courseid] = { 'courseNumber': number, 'courseTitle': title, 'courseDescription': desc, 'prerequisites': [], 'corequisites': [], 'concurrent': []