def int_duration(self): hours, minutes = 0, 0 match = re.search(r'(\d+)hr', self.duration) if match: hours = int(match.group(1)) match = re.search(r'(\d+)min', self.duration) if match: minutes = int(match.group(1)) return int(hours)*60 + int(minutes)
def lookup_mac(self, event, _, mac): oui = mac.replace('-', '').replace(':', '').upper()[:6] ouis = open(cacheable_download('http://standards.ieee.org/regauth/oui/oui.txt', 'sysadmin/oui.txt')) match = re.search(r'^\s*%s\s+\(base 16\)\s+(.+?)$' % oui, ouis.read(), re.MULTILINE) ouis.close() if match: name = match.group(1).decode('utf8').title() event.addresponse(u"That belongs to %s", name) else: event.addresponse(u"I don't know who that belongs to")
def _request(self, url, method): scheme, host = urlparse(url)[:2] scheme = scheme.lower() proxies = getproxies_environment() if scheme in proxies: scheme, host = urlparse(proxies[scheme])[:2] scheme = scheme.lower() kwargs = {} if version_info[1] >= 6: kwargs['timeout'] = self.timeout else: socket.setdefaulttimeout(self.timeout) if scheme == "https": conn = HTTPSConnection(host, **kwargs) else: conn = HTTPConnection(host, **kwargs) headers = {} if method == 'GET': headers['Range'] = 'bytes=0-%s' % self.max_size try: try: conn.request(method.upper(), url_to_bytestring(url), headers=headers) response = conn.getresponse() data = response.read(self.max_size) conn.close() except socket.error, e: raise HTTPException(e.message or e.args[1]) finally: if version_info[1] < 6: socket.setdefaulttimeout(None) contenttype = response.getheader('Content-Type', None) if contenttype: match = re.search('^charset=([a-zA-Z0-9-]+)', contenttype) try: if match: data = data.decode(match.group(1)) elif contenttype.startswith('text/'): data = data.decode('utf-8') except UnicodeDecodeError: guessed = detect(data) if guessed['confidence'] > 0.5: charset = guessed['encoding'] # Common guessing mistake: if charset.startswith('ISO-8859') and '\x92' in data: charset = 'windows-1252' data = unicode(data, charset, errors='replace') return response.status, response.reason, data, response.getheaders()
def _request(self, url, method): scheme, host = urlparse(url)[:2] scheme = scheme.lower() proxies = getproxies_environment() if scheme in proxies: scheme, host = urlparse(proxies[scheme])[:2] scheme = scheme.lower() kwargs = {} if version_info[1] >= 6: kwargs["timeout"] = self.timeout else: socket.setdefaulttimeout(self.timeout) if scheme == "https": conn = HTTPSConnection(host, **kwargs) else: conn = HTTPConnection(host, **kwargs) headers = {} if method == "GET": headers["Range"] = "bytes=0-%s" % self.max_size try: try: conn.request(method.upper(), iri_to_uri(url), headers=headers) response = conn.getresponse() data = response.read(self.max_size) conn.close() except socket.error, e: raise HTTPException(e.message or e.args[1]) finally: if version_info[1] < 6: socket.setdefaulttimeout(None) contenttype = response.getheader("Content-Type", None) if contenttype: match = re.search("^charset=([a-zA-Z0-9-]+)", contenttype) try: if match: data = data.decode(match.group(1)) elif contenttype.startswith("text/"): data = data.decode("utf-8") except UnicodeDecodeError: guessed = detect(data) if guessed["confidence"] > 0.5: charset = guessed["encoding"] # Common guessing mistake: if charset.startswith("ISO-8859") and "\x92" in data: charset = "windows-1252" data = unicode(data, charset, errors="replace") return response.status, response.reason, data, response.getheaders()
def _flight_search(self, event, dpt, to, dep_date, ret_date): airport_dpt = self._airport_search(dpt) airport_to = self._airport_search(to) if len(airport_dpt) == 0: event.addresponse(u"Sorry, I don't know the airport you want to leave from") return if len(airport_to) == 0: event.addresponse(u"Sorry, I don't know the airport you want to fly to") return if len(airport_dpt) > 1: event.addresponse(u'The following airports match the departure: %s', human_join(self.repr_airport(id) for id in airport_dpt)[:480]) return if len(airport_to) > 1: event.addresponse(u'The following airports match the destination: %s', human_join(self.repr_airport(id) for id in airport_to)[:480]) return dpt = airport_dpt[0] to = airport_to[0] def to_travelocity_date(date): date = date.lower() time = None for period in [u'anytime', u'morning', u'afternoon', u'evening']: if period in date: time = period.title() date = date.replace(period, u'') break try: date = parse(date) except ValueError: raise FlightException(u"Sorry, I can't understand the date %s" % date) if time is None: if date.hour == 0 and date.minute == 0: time = u'Anytime' else: time = date.strftime('%I:00') if time[0] == u'0': time = time[1:] if date.hour < 12: time += u'am' else: time += u'pm' date = date.strftime('%m/%d/%Y') return (date, time) (dep_date, dep_time) = to_travelocity_date(dep_date) (ret_date, ret_time) = to_travelocity_date(ret_date) params = {} params[u'leavingFrom'] = self.airports[dpt][3] params[u'goingTo'] = self.airports[to][3] params[u'leavingDate'] = dep_date params[u'dateLeavingTime'] = dep_time params[u'returningDate'] = ret_date params[u'dateReturningTime'] = ret_time etree = get_html_parse_tree('http://travel.travelocity.com/flights/InitialSearch.do', data=urlencode(params), treetype='etree') while True: script = [script for script in etree.getiterator(u'script')][1] matches = script.text and re.search(r'var finurl = "(.*)"', script.text) if matches: url = u'http://travel.travelocity.com/flights/%s' % matches.group(1) etree = get_html_parse_tree(url, treetype=u'etree') else: break # Handle error div = [d for d in etree.getiterator(u'div') if d.get(u'class') == u'e_content'] if len(div): error = div[0].find(u'h3').text raise FlightException(error) departing_flights = self._parse_travelocity(etree) return_url = None table = [t for t in etree.getiterator(u'table') if t.get(u'id') == u'tfGrid'][0] for tr in table.getiterator(u'tr'): for td in tr.getiterator(u'td'): if td.get(u'class').strip() in [u'tfPrice', u'tfPriceOrButton']: onclick = td.find(u'div/button').get(u'onclick') match = re.search(r"location.href='\.\./flights/(.+)'", onclick) url_page = match.group(1) match = re.search(r'^(.*?)[^/]*$', url) url_base = match.group(1) return_url = url_base + url_page etree = get_html_parse_tree(return_url, treetype=u'etree') returning_flights = self._parse_travelocity(etree) return (departing_flights, returning_flights, url)