def datagramReceived(self, data, addr, outip): if outip not in self.interfaces: if self.INADDR_ANY not in self.interfaces: return req_line, data = data.split('\r\n', 1) method, path, version = req_line.split(None, 3) # check method if method != 'M-SEARCH' or path != '*': return # parse header headers = HTTPMessage(StringIO(data)) mx = int(headers.getheader('MX')) # send M-SEARCH response for udn in self.devices: device = self.devices[udn] delay = random() * mx for packet in device.make_msearch_response(headers, (outip, self.port), addr): buff = build_packet('HTTP/1.1 200 OK', packet) self.reactor.callLater(delay, self._send_packet, self.ssdp, buff, addr) delay += self.SSDP_INTERVAL
def __init__(self, replay_response, method=None): self.reason = replay_response['status']['message'] self.status = replay_response['status']['code'] self.version = None if 'body_text' in replay_response: # JSON decoder returns unicode, not str, so this needs to be # encoded to properly reproduce content off the wire. self._content = replay_response['body_text'].encode('utf8') elif 'body_quoted_printable' in replay_response: # quopri.decodestring returns str, which is correct for content off # the wire. self._content = quopri.decodestring(replay_response['body_quoted_printable']) else: # .decode('base64') returns str, which is correct for content off # the wire. self._content = replay_response['body'].decode('base64') self.fp = StringIO(self._content) msg_fp = StringIO('\r\n'.join('{}: {}'.format(h, v) for h, v in replay_response['headers'].iteritems())) self.msg = HTTPMessage(msg_fp) self.msg.fp = None # httplib does this, okay? length = self.msg.getheader('content-length') self.length = int(length) if length else None # Save method to handle HEAD specially as httplib does self._method = method
class VCRHTTPResponse(object): """ Stub reponse class that gets returned instead of a HTTPResponse """ def __init__(self, recorded_response): self.recorded_response = recorded_response self.reason = recorded_response['status']['message'] self.status = recorded_response['status']['code'] self.version = None self._content = StringIO(self.recorded_response['body']['string']) self.msg = HTTPMessage(StringIO('')) for k, v in self.recorded_response['headers'].iteritems(): self.msg.addheader(k, v) self.length = self.msg.getheader('content-length') or None def read(self, chunked=False): # Note: I'm pretty much ignoring any chunking stuff because # I don't really understand what it is or how it works. return self._content.read() def isclosed(self): # Urllib3 seems to call this because it actually uses # the weird chunking support in httplib return True def getheaders(self): return self.recorded_response['headers'].iteritems()
def update(self, lock): """ See if a http or rss page changed. """ self.error = False self.specto.mark_watch_busy(True, self.id) self.specto.logger.log( _("Updating watch: \"%s\"") % self.name, "info", self.__class__) # Create a unique name for each url. digest = md5.new(self.url_).digest() cacheFileName = "".join(["%02x" % (ord(c), ) for c in digest]) self.cacheFullPath_ = os.path.join(cacheSubDir__, cacheFileName) request = urllib2.Request(self.url_, None, {"Accept-encoding": "gzip"}) if (self.cached == 1) or (os.path.exists(self.cacheFullPath_)): self.cached = 1 f = file(self.cacheFullPath_, "r") # Load up the cached version self.infoB_ = HTTPMessage(f) if self.infoB_.has_key('last-modified'): request.add_header("If-Modified-Since", self.infoB_['last-modified']) if self.infoB_.has_key('ETag'): request.add_header("If-None-Match", self.infoB_['ETag']) try: response = urllib2.urlopen(request) except (urllib2.URLError, BadStatusLine), e: self.error = True self.specto.logger.log( _("Watch: \"%s\" has error: ") % self.name + str(e), "error", self.__class__)
def parse_headers(header_list): if isinstance(header_list, dict): return parse_headers_backwards_compat(header_list) headers = "".join(header_list) + "\r\n" msg = HTTPMessage(StringIO(headers)) msg.fp.seek(0) msg.readheaders() return msg
def __init__(self, data): BytesIO.__init__(self, data) self.status = 200 self.version = 'HTTP/1.1' self.reason = 'OK' if PY2: self.msg = HTTPMessage(BytesIO(b'Content-Type: application/x-compressed\r\n')) else: self.msg = HTTPMessage() self.msg.add_header('Content-Type', 'application/x-compressed')
def __init__(self, data): BytesIO.__init__(self, data) self.status = 200 self.version = 'HTTP/1.1' self.reason = 'OK' if PY2: self.msg = HTTPMessage(BytesIO('Content-Type: application/x-compressed\r\n')) else: self.msg = HTTPMessage() self.msg.add_header('Content-Type', 'application/x-compressed')
def parse_headers_backwards_compat(header_dict): """ In vcr 0.6.0, I changed the cassettes to store headers as a list instead of a dict. This method parses the old dictionary-style headers for backwards-compatability reasons. """ msg = HTTPMessage(StringIO("")) for key, val in header_dict.iteritems(): msg.addheader(key, val) msg.headers.append("{0}:{1}".format(key, val)) return msg
class VCRHTTPResponse(object): def __init__(self, recorded_response): self.recorded_response = recorded_response self.reason = recorded_response["status"]["message"] self.status = recorded_response["status"]["code"] self._content = StringIO(self.recorded_response["body"]["string"]) self.msg = HTTPMessage(StringIO("")) for k, v in self.recorded_response["headers"].iteritems(): self.msg.addheader(k, v) def read(self, chunked=False): return self._content.read()
def __init__(self, content): if PY2x: fp = StringIO(content) self._info = HTTPMessage(fp) else: self._info = HTTPMessage() # Adjust to testdata. la = content.split(':') if len(la) > 1: # Get the type by just # using the data at the end. t = la[-1].strip() self._info.set_type(t)
def __init__(self, data): req_line, headers = data.split('\r\n', 1) # HTTPMessage has no proper __repr__, so let's use the dictionary dict = HTTPMessage(StringIO(headers)).dict.copy() # all header names in the UPnP specs are uppercase self.headers = {k.upper(): v for k, v in dict.items()} method = req_line.split(' ')[0] self._notify = method == 'NOTIFY' # Unique Service Name => Unique Device Name + Type usn = self.headers.get('USN') self._udn, self._type = split_usn(usn) if usn else (None, None)
def update(self, lock): """ See if a http or rss page changed. """ self.error = False self.specto.mark_watch_busy(True, self.id) self.specto.logger.log(_("Updating watch: \"%s\"") % self.name, "info", self.__class__) # Create a unique name for each url. digest = md5.new(self.url_).digest() cacheFileName = "".join(["%02x" % (ord(c),) for c in digest]) self.cacheFullPath_ = os.path.join(cacheSubDir__, cacheFileName) request = urllib2.Request(self.url_, None, {"Accept-encoding" : "gzip"}) if (self.cached == 1) or (os.path.exists(self.cacheFullPath_)): self.cached = 1 f = file(self.cacheFullPath_, "r")# Load up the cached version self.infoB_ = HTTPMessage(f) if self.infoB_.has_key('last-modified'): request.add_header("If-Modified-Since", self.infoB_['last-modified']) if self.infoB_.has_key('ETag'): request.add_header("If-None-Match", self.infoB_['ETag']) try: response = urllib2.urlopen(request) except (urllib2.URLError, BadStatusLine), e: self.error = True self.specto.logger.log(_("Watch: \"%s\" has error: ") % self.name + str(e), "error", self.__class__)
class MockHttpLibResponse(BytesIO): def __init__(self, data): BytesIO.__init__(self, data) self.status = 200 self.version = 'HTTP/1.1' self.reason = 'OK' if PY2: self.msg = HTTPMessage(BytesIO('Content-Type: application/x-compressed\r\n')) else: self.msg = HTTPMessage() self.msg.add_header('Content-Type', 'application/x-compressed') def getheaders(self): return list(self.msg.items()) def isclosed(self): return self.closed
class MockHttpLibResponse(BytesIO): def __init__(self, data): BytesIO.__init__(self, data) self.status = 200 self.version = 'HTTP/1.1' self.reason = 'OK' if PY2: self.msg = HTTPMessage(BytesIO(b'Content-Type: application/x-compressed\r\n')) else: self.msg = HTTPMessage() self.msg.add_header('Content-Type', 'application/x-compressed') def getheaders(self): return list(self.msg.items()) def isclosed(self): return self.closed
def __init__(self, recorded_response): self.recorded_response = recorded_response self.reason = recorded_response["status"]["message"] self.status = recorded_response["status"]["code"] self._content = StringIO(self.recorded_response["body"]["string"]) self.msg = HTTPMessage(StringIO("")) for k, v in self.recorded_response["headers"].iteritems(): self.msg.addheader(k, v)
def __init__(self, recorded_response): self.recorded_response = recorded_response self.reason = recorded_response['status']['message'] self.status = recorded_response['status']['code'] self.version = None self._content = StringIO(self.recorded_response['body']['string']) # We are skipping the header parsing (they have already been parsed # at this point) and directly adding the headers to the header # container, so just pass an empty StringIO. self.msg = HTTPMessage(StringIO('')) for key, val in self.recorded_response['headers'].iteritems(): self.msg.addheader(key, val) # msg.addheaders adds the headers to msg.dict, but not to # the msg.headers list representation of headers, so # I have to add it to both. self.msg.headers.append("{0}:{1}".format(key, val)) self.length = self.msg.getheader('content-length') or None
def test_from_httplib(self): if six.PY3: raise SkipTest() from httplib import HTTPMessage from StringIO import StringIO msg = """ Server: nginx Content-Type: text/html; charset=windows-1251 Connection: keep-alive Set-Cookie: bb_lastvisit=1348253375; expires=Sat, 21-Sep-2013 18:49:35 GMT; path=/ Set-Cookie: bb_lastactivity=0; expires=Sat, 21-Sep-2013 18:49:35 GMT; path=/ """ msg = HTTPMessage(StringIO(msg.lstrip().replace('\n', '\r\n'))) d = HTTPHeaderDict.from_httplib(msg) self.assertEqual(d['server'], 'nginx') cookies = d.getlist('set-cookie') self.assertEqual(len(cookies), 2) self.assertTrue(cookies[0].startswith("bb_lastvisit")) self.assertTrue(cookies[1].startswith("bb_lastactivity"))
def __init__(self, recorded_response): self.recorded_response = recorded_response self.reason = recorded_response['status']['message'] self.status = recorded_response['status']['code'] self.version = None self._content = StringIO(self.recorded_response['body']['string']) self.msg = HTTPMessage(StringIO('')) for k, v in self.recorded_response['headers'].iteritems(): self.msg.addheader(k, v) self.length = self.msg.getheader('content-length') or None
class VCRHTTPResponse(object): """ Stub reponse class that gets returned instead of a HTTPResponse """ def __init__(self, recorded_response): self.recorded_response = recorded_response self.reason = recorded_response['status']['message'] self.status = recorded_response['status']['code'] self.version = None self._content = StringIO(self.recorded_response['body']['string']) # We are skipping the header parsing (they have already been parsed # at this point) and directly adding the headers to the header # container, so just pass an empty StringIO. self.msg = HTTPMessage(StringIO('')) for key, val in self.recorded_response['headers'].iteritems(): self.msg.addheader(key, val) # msg.addheaders adds the headers to msg.dict, but not to # the msg.headers list representation of headers, so # I have to add it to both. self.msg.headers.append("{0}:{1}".format(key, val)) self.length = self.msg.getheader('content-length') or None def read(self, *args, **kwargs): # Note: I'm pretty much ignoring any chunking stuff because # I don't really understand what it is or how it works. return self._content.read(*args, **kwargs) def close(self): return True def isclosed(self): # Urllib3 seems to call this because it actually uses # the weird chunking support in httplib return True def getheaders(self): return self.recorded_response['headers'].iteritems()
def __init__(self, replay_response, method=None): self.reason = replay_response['status']['message'] self.status = replay_response['status']['code'] self.version = None if 'body_quoted_printable' in replay_response: self._content = quopri.decodestring( replay_response['body_quoted_printable']) else: self._content = replay_response['body'].decode('base64') self.fp = StringIO(self._content) msg_fp = StringIO('\r\n'.join( '{}: {}'.format(h, v) for h, v in replay_response['headers'].iteritems())) self.msg = HTTPMessage(msg_fp) self.msg.fp = None # httplib does this, okay? length = self.msg.getheader('content-length') self.length = int(length) if length else None # Save method to handle HEAD specially as httplib does self._method = method
def __init__(self, conn): HPPResponse.__init__(self, sock=conn.sock, debuglevel=conn.debuglevel, strict=conn.strict, method=conn._method) self.chunked = False self.will_close = False self.length = 0 self.status = 200 self.reason = 'OK' self.msg = HTTPMessage(StringIO(), seekable=0) self.msg.fp = None
def inner(*args, **kwargs): with patch('sanction.urlopen') as mock_urlopen: bdata = type(data) is basestring and data.encode() or data sheaders = '' if headers is not None: sheaders = '\r\n'.join( ['{}: {}'.format(k, v) for k, v in headers.items()]) bheaders = (sheaders or '').encode() mock_urlopen.return_value = addinfourl(BytesIO(bdata), HTTPMessage( BytesIO(bheaders)), '', code=code) fn(*args, **kwargs)
def expect_response(self): if self.fp: self.fp.close() self.fp = None self.fp = self.sock.makefile('rb', 0) version, status, reason = self._read_status() if status != CONTINUE: self._read_status = lambda: (version, status, reason) self.begin() else: self.status = status self.reason = reason.strip() self.version = 11 self.msg = HTTPMessage(self.fp, 0) self.msg.fp = None
def find_user_password(self, realm, authuri): """ Limit number of queries per request. Note that retries needs to be reset in the calling code. """ # allow sending the username:password 5 times before failing! if self.retries > 5: from httplib import HTTPMessage from StringIO import StringIO raise urllib2.HTTPError(authuri, 401, "basic auth failed for realm %r" % realm, HTTPMessage(StringIO("")), None) self.retries += 1 return urllib2.HTTPPasswordMgrWithDefaultRealm.find_user_password( self, realm, authuri)
def from_dict(cls, data): """Create object from dict.""" obj = cls() for k in cls.attrs: if k in data: setattr(obj, k, data[k]) obj.fp = cls.create_file_descriptor(obj.content) obj.msg = HTTPMessage(io.StringIO(unicode()), 0) for k, v in obj.headers.iteritems(): obj.msg.addheader(k, v) obj.msg.headers = data["raw_headers"] return obj
def __call__(self, opener, method, url, data, headers): if url in self._exceptions: raise self._exceptions[url] if url in self._cookie_responses: plain_headers = self._cookie_responses[url] headers_text = '\n'.join([ header_name + ': ' + plain_headers[header_name] for header_name in plain_headers ]) response_headers = HTTPMessage(StringIO(headers_text)) return self.build_response(url, 200, headers['Cookie'], 'OK', response_headers) elif url in self._responses: return self.build_response(*self._responses[url]) else: return self.build_response(url, 404, '', 'Not Found')
def __init__(self, replay_response, method=None): self.reason = replay_response['status']['message'] self.status = replay_response['status']['code'] self.version = None if 'body_quoted_printable' in replay_response: self._content = quopri.decodestring(replay_response['body_quoted_printable']) else: self._content = replay_response['body'].decode('base64') self.fp = StringIO(self._content) msg_fp = StringIO('\r\n'.join('{}: {}'.format(h, v) for h, v in replay_response['headers'].iteritems())) self.msg = HTTPMessage(msg_fp) self.msg.fp = None # httplib does this, okay? length = self.msg.getheader('content-length') self.length = int(length) if length else None # Save method to handle HEAD specially as httplib does self._method = method
def readbodychunked(self, ifile, ofile, dechunk=False, deflength=4096): totaldata = 0 while True: # Chunk length (discard trailing ';....') clength = ifile.readline() if not dechunk: ofile.write(clength) clength = clength.rstrip('\r\n').split(';', 1)[0] clength = int(clength, 16) if not clength: break # Chunk data while clength: data = ifile.read(min(clength, deflength)) ldata = len(data) clength -= ldata totaldata += ldata ofile.write(data) # Chunk end - trailing newline chunkend = ifile.readline() if not dechunk: ofile.write(chunkend) ofile.flush() # Chunk trailers if dechunk: ofile = StringIO() while True: line = ifile.readline() ofile.write(line) if line == '\r\n' or line == '\r': break ofile.flush() footers = None if not dechunk else HTTPMessage(ofile, seekable=0) return (totaldata, footers)
def from_dict(cls, data): """Create object from dict.""" # Hack to ensure backwards compatibility with older versions of the # that did not have the length and version attributes. data.setdefault('length', len(data['content'])) data.setdefault('version', 10) obj = cls() for k in cls.attrs: setattr(obj, k, data[k]) obj.fp = cls.create_file_descriptor(obj.content) obj.msg = HTTPMessage(io.StringIO(unicode()), 0) for k, v in obj.headers.iteritems(): obj.msg.addheader(k, v) obj.msg.headers = data["raw_headers"] return obj
def response(self, url, bypass_content): # bypass_content: { url1: [content, headers], try: data = json.loads(bypass_content) if url in data: d = data[url] fp = io.BytesIO(d[0].encode('utf-8')) headers = HTTPMessage(io.StringIO(unicode(d[1])), 0) # for head in d[1].split("\n"): # xy = head.split(":") # headers.addheader(xy[0].strip(), xy[1].strip()) url = url code = 200 msg = 'OK' res = addinfourl(fp, headers, url, code) youtubedl_logger.info('Request Bypassed for url -> '+ url) return res else: youtubedl_logger.info('Request Skipped for url -> ' + url) return None except Exception as e: youtubedl_logger.info('Request Exception for url -> ' + url) traceback.print_exc() return None
class Web_watch(Watch): """ Watch class that will check if http or rss pages are changed. """ url_ = "" info_ = None content_ = None lastModified_ = None digest_ = None refresh_ = None infoB_ = None cached = 0 url2_ = "" updated = False actually_updated = False type = 0 def __init__(self, specto, name, refresh, url, id, error_margin): Watch.__init__(self, specto) #init superclass self.refresh = refresh self.id = id self.url_ = url if self.url_ == "": self.specto.logger.log( _("Watch: \"%s\" has error: empty url") % self.error, "error", self.__class__) self.name = name self.error_margin = error_margin #the amount in percent (as a float) of what the filesize must change to consider the page changed self.error = False def dict_values(self): return { 'name': self.name, 'refresh': self.refresh, 'uri': self.url_, 'error_margin': self.error_margin, 'type': 0 } def start_watch(self): """ Start the watch. """ self.thread_update() def _real_update(self): self.specto.notifier.connected_message( True) #hide the network error message lock = thread.allocate_lock() lock.acquire() t = thread.start_new_thread(self.update, (lock, )) while lock.locked(): while gtk.events_pending(): gtk.main_iteration() time.sleep(0.05) while gtk.events_pending(): gtk.main_iteration() def thread_update(self): if not self.specto.connection_manager.connected(): self.specto.logger.log(_("No network connection detected"), "info", self.__class__) self.specto.notifier.connected_message( False) #show the network error message self.specto.connection_manager.add_callback(self._real_update) self.specto.mark_watch_busy(False, self.id) else: self._real_update() def update(self, lock): """ See if a http or rss page changed. """ self.error = False self.specto.mark_watch_busy(True, self.id) self.specto.logger.log( _("Updating watch: \"%s\"") % self.name, "info", self.__class__) # Create a unique name for each url. digest = md5.new(self.url_).digest() cacheFileName = "".join(["%02x" % (ord(c), ) for c in digest]) self.cacheFullPath_ = os.path.join(cacheSubDir__, cacheFileName) request = urllib2.Request(self.url_, None, {"Accept-encoding": "gzip"}) if (self.cached == 1) or (os.path.exists(self.cacheFullPath_)): self.cached = 1 f = file(self.cacheFullPath_, "r") # Load up the cached version self.infoB_ = HTTPMessage(f) if self.infoB_.has_key('last-modified'): request.add_header("If-Modified-Since", self.infoB_['last-modified']) if self.infoB_.has_key('ETag'): request.add_header("If-None-Match", self.infoB_['ETag']) try: response = urllib2.urlopen(request) except (urllib2.URLError, BadStatusLine), e: self.error = True self.specto.logger.log( _("Watch: \"%s\" has error: ") % self.name + str(e), "error", self.__class__) else:
def clone(self): cloneobj = HTTPMessage(StringIO(), seekable=0) cloneobj.dict = self.dict.copy() cloneobj.headers = copy(self.headers) return cloneobj
class ReplayHTTPResponse(object): """ A replay response object, with just enough functionality to make the various HTTP/URL libraries out there happy. """ __text_content_types = ( 'text/', 'application/json', ) def __init__(self, replay_response, method=None): self.reason = replay_response['status']['message'] self.status = replay_response['status']['code'] self.version = None if 'body_quoted_printable' in replay_response: self._content = quopri.decodestring(replay_response['body_quoted_printable']) else: self._content = replay_response['body'].decode('base64') self.fp = StringIO(self._content) msg_fp = StringIO('\r\n'.join('{}: {}'.format(h, v) for h, v in replay_response['headers'].iteritems())) self.msg = HTTPMessage(msg_fp) self.msg.fp = None # httplib does this, okay? length = self.msg.getheader('content-length') self.length = int(length) if length else None # Save method to handle HEAD specially as httplib does self._method = method @classmethod def make_replay_response(cls, response): """ Converts real response to replay_response dict which can be saved and/or used to initialize a ReplayHTTPResponse. """ replay_response = {} body = response.read() # undecoded byte string # Add body to replay_response, either as quoted printable for # text responses or base64 for binary responses. if response.getheader('content-type', '') \ .startswith(cls.__text_content_types): if response.getheader('content-encoding') in ['gzip', 'deflate']: # http://stackoverflow.com/questions/2695152 body = zlib.decompress(body, 16 + zlib.MAX_WBITS) del response.msg['content-encoding'] # decompression changes the length if 'content-length' in response.msg: response.msg['content-length'] = str(len(body)) replay_response['body_quoted_printable'] = quopri.encodestring(body) else: replay_response['body'] = body.encode('base64') replay_response.update(dict( status=dict(code=response.status, message=response.reason), headers=dict(response.getheaders()))) return replay_response def close(self): self.fp = None def isclosed(self): return self.fp is None def read(self, amt=None): """ The important parts of HTTPResponse.read() """ if self.fp is None: return '' if self._method == 'HEAD': self.close() return '' if self.length is not None: amt = min(amt, self.length) # StringIO doesn't like read(None) s = self.fp.read() if amt is None else self.fp.read(amt) if not s: self.close() if self.length is not None: self.length -= len(s) if not self.length: self.close() return s def getheader(self, name, default=None): return self.msg.getheader(name, default) def getheaders(self): return self.msg.items()
class Web_watch(Watch): """ Watch class that will check if http or rss pages are changed. """ url_ = "" info_ = None content_ = None lastModified_ = None digest_ = None refresh_ = None infoB_ = None cached = 0 url2_ = "" updated = False actually_updated = False type = 0 def __init__(self, specto, name, refresh, url, id, error_margin): Watch.__init__(self, specto) #init superclass self.refresh = refresh self.id = id self.url_ = url if self.url_ == "": self.specto.logger.log(_("Watch: \"%s\" has error: empty url") % self.error, "error", self.__class__) self.name = name self.error_margin = error_margin#the amount in percent (as a float) of what the filesize must change to consider the page changed self.error = False def dict_values(self): return { 'name': self.name, 'refresh': self.refresh, 'uri': self.url_, 'error_margin':self.error_margin, 'type':0 } def start_watch(self): """ Start the watch. """ self.thread_update() def _real_update(self): self.specto.notifier.connected_message(True)#hide the network error message lock = thread.allocate_lock() lock.acquire() t=thread.start_new_thread(self.update,(lock,)) while lock.locked(): while gtk.events_pending(): gtk.main_iteration() time.sleep(0.05) while gtk.events_pending(): gtk.main_iteration() def thread_update(self): if not self.specto.connection_manager.connected(): self.specto.logger.log(_("No network connection detected"), "info", self.__class__) self.specto.notifier.connected_message(False) #show the network error message self.specto.connection_manager.add_callback(self._real_update) self.specto.mark_watch_busy(False, self.id) else : self._real_update() def update(self, lock): """ See if a http or rss page changed. """ self.error = False self.specto.mark_watch_busy(True, self.id) self.specto.logger.log(_("Updating watch: \"%s\"") % self.name, "info", self.__class__) # Create a unique name for each url. digest = md5.new(self.url_).digest() cacheFileName = "".join(["%02x" % (ord(c),) for c in digest]) self.cacheFullPath_ = os.path.join(cacheSubDir__, cacheFileName) request = urllib2.Request(self.url_, None, {"Accept-encoding" : "gzip"}) if (self.cached == 1) or (os.path.exists(self.cacheFullPath_)): self.cached = 1 f = file(self.cacheFullPath_, "r")# Load up the cached version self.infoB_ = HTTPMessage(f) if self.infoB_.has_key('last-modified'): request.add_header("If-Modified-Since", self.infoB_['last-modified']) if self.infoB_.has_key('ETag'): request.add_header("If-None-Match", self.infoB_['ETag']) try: response = urllib2.urlopen(request) except (urllib2.URLError, BadStatusLine), e: self.error = True self.specto.logger.log(_("Watch: \"%s\" has error: ") % self.name + str(e), "error", self.__class__) else:
def __init__(self): HTTPMessage.__init__(self, BytesIO())
def parse_flow(IP): styleID = ''.join(choice(string.ascii_lowercase + string.digits) for x in range(randint(8, 12))) ssize = len(styleID) p = sub.Popen(['tcpflow', '-T %T--%A-%B', '-cJB', '-r', (os.getenv('PROCDOTPLUGIN_WindumpFilePcap'))], stdout=sub.PIPE, stderr=sub.PIPE) stdout, stderr = p.communicate() stdout = stdout.replace('\r\n', '\n') if IP not in stdout: e = str("No tcp flows found for ")+IP open(out, 'ab').write(e) else: if os.getenv('PROCDOTPLUGIN_PluginEngineVersion') is not None: open(out,'ab').write('{{{style-id:default;color:blue;style-id:'+styleID+';color:red}}}') m = re.findall ( '\x1b\[0;31m(.*?)\x1b\[0m|\x1b\[0;34m(.*?)\x1b\[0m', stdout, re.DOTALL) m = iter(m) for b, r in m: if b == '': if IP in r: r = r[56:] r = re.sub( '[^!\"#\$%&\'\(\)\*\+,-\./0-9:;<=>\?@A-Z\[\]\^_`a-z\{\|\}\\\~\t\n\r ]','.', r) if os.stat(out).st_size <= 53 + ssize: if os.getenv('PROCDOTPLUGIN_PluginEngineVersion') is not None: open(out,'ab').write('<'+styleID+'>'+r+'</'+styleID+'>') else: open(out,'ab').write(r) else: if os.getenv('PROCDOTPLUGIN_PluginEngineVersion') is not None: open(out,'ab').write('\n\n'+'<'+styleID+'>'+r+'</'+styleID+'>') else: open(out,'ab').write('\n\n'+r) else: if IP in b: b = b[56:] match = re.match( '^HTTP.*', b) try: if match: length = 1 num = 0 while length != num: d = zlib.decompressobj(16+zlib.MAX_WBITS) output = StringIO.StringIO(b) status_line = output.readline() msg = HTTPMessage(output, 0) isLength = msg.get('Content-Length') isGZipped = msg.get('content-encoding', '').find('gzip') >= 0 isChunked = msg.get('Transfer-Encoding', '').find('chunked') >= 0 if isGZipped and isChunked: offset = msg.fp.readline() body = msg.fp.read() num = int(offset, 16) encdata = '' newdata = '' encdata =body[:num] length = len(encdata) if length != num: c = next(m) d, e = c b = b + d[56:] else: newdata = d.decompress(encdata) header = str(msg) open(out,'ab').write(status_line) open(out,'ab').write(header) open(out,'ab').write('\n') open(out,'ab').write(newdata) elif isGZipped: length = int(isLength) body = msg.fp.read() num = len(body) if length != num: c = next(m) d, e = c if IP in d: b = b + d[56:] else: data = d.decompress(body) header = str(msg) open(out,'ab').write(status_line) open(out,'ab').write(header) open(out,'ab').write('\n') open(out,'ab').write(data) else: length = 1 num = 1 body = msg.fp.read() body = re.sub( '[^!\"#\$%&\'\(\)\*\+,-\./0-9:;<=>\?@A-Z\[\]\^_`a-z\{\|\}\\\~\t\n\r ]','.', body) header = str(msg) open(out,'ab').write(status_line) open(out,'ab').write(header) open(out,'ab').write('\n') open(out,'ab').write(body) else: b = re.sub( '[^!\"#\$%&\'\(\)\*\+,-\./0-9:;<=>\?@A-Z\[\]\^_`a-z\{\|\}\\\~\t\n\r ]','.', b) open(out,'ab').write(b) except: open(out,'ab').write('DECOMPRESSION ERROR') open(out,'ab').write('\n\n') b = re.sub( '[^!\"#\$%&\'\(\)\*\+,-\./0-9:;<=>\?@A-Z\[\]\^_`a-z\{\|\}\\\~\t\n\r ]','.', b) open(out,'ab').write(b)
def __init__(self): self.msg = HTTPMessage(StringIO()) self.msg['content-type'] = 'text/plain' self.status = 400 self.reason = 'Bad Request'
def __init__(self, method, path, version='HTTP/1.1'): self.method = method.upper() self.path = path self.version = version self.msg = HTTPMessage(StringIO()) self.body = ''
def make_headers(fp): return HTTPMessage(fp) if Compatibility.PY2 else parse_headers(fp)
def parse_headers(fp): return HTTPMessage(fp, 0)
def make_headers(fp): return HTTPMessage(fp) if PY2 else parse_headers(fp)
def parse_flow(IP): p = sub.Popen(['tcpflow', '-T %T--%A-%B', '-cJB', '-r', (os.getenv('PROCDOTPLUGIN_WindumpFilePcap'))], stdout=sub.PIPE, stderr=sub.PIPE) stdout, stderr = p.communicate() stdout = stdout.replace('\r\n', '\n') if IP not in stdout: e = str("No tcp flows found for ")+IP open(out, 'ab').write(e) else: m = re.findall ( '\x1b\[0;3[1|4]m(.*?)\x1b\[0m', stdout, re.DOTALL) m = iter(m) for line in m: if IP in line: line = line[56:] match = re.match( '^HTTP.*', line) try: if match: length = 1 num = 0 while length != num: d = zlib.decompressobj(16+zlib.MAX_WBITS) output = StringIO.StringIO(line) status_line = output.readline() msg = HTTPMessage(output, 0) isGZipped = msg.get('content-encoding', '').find('gzip') >= 0 isChunked = msg.get('Transfer-Encoding', '').find('chunked') >= 0 if isGZipped and isChunked: offset = msg.fp.readline() body = msg.fp.read() num = int(offset, 16) encdata = '' newdata = '' encdata =body[:num] length = len(encdata) if length != num: line = line + next(m)[56:] else: newdata = d.decompress(encdata) header = str(msg) open(out,'ab').write(status_line) open(out,'ab').write(header) open(out,'ab').write('\n') open(out,'ab').write(newdata) elif isGZipped: length = 1 num = 1 body = msg.fp.read() data = d.decompress(body) header = str(msg) open(out,'ab').write(status_line) open(out,'ab').write(header) open(out,'ab').write('\n') open(out,'ab').write(data) else: break else: line = re.sub( '[^!\"#\$%&\'\(\)\*\+,-\./0-9:;<=>\?@A-Z\[\]\^_`a-z\{\|\}\\\~\t\n\r ]','.', line) open(out,'ab').write(line) except: open(out,'ab').write('DECOMPRESSION ERROR') open(out,'ab').write('\n\n') open(out,'ab').write(line)
class ReplayHTTPResponse(object): """ A replay response object, with just enough functionality to make the various HTTP/URL libraries out there happy. """ __text_content_types = ( 'text/', 'application/json', ) def __init__(self, replay_response, method=None): self.reason = replay_response['status']['message'] self.status = replay_response['status']['code'] self.version = None if 'body_text' in replay_response: # JSON decoder returns unicode, not str, so this needs to be # encoded to properly reproduce content off the wire. self._content = replay_response['body_text'].encode('utf8') elif 'body_quoted_printable' in replay_response: # quopri.decodestring returns str, which is correct for content off # the wire. self._content = quopri.decodestring(replay_response['body_quoted_printable']) else: # .decode('base64') returns str, which is correct for content off # the wire. self._content = replay_response['body'].decode('base64') self.fp = StringIO(self._content) msg_fp = StringIO('\r\n'.join('{}: {}'.format(h, v) for h, v in replay_response['headers'].iteritems())) self.msg = HTTPMessage(msg_fp) self.msg.fp = None # httplib does this, okay? length = self.msg.getheader('content-length') self.length = int(length) if length else None # Save method to handle HEAD specially as httplib does self._method = method @classmethod def make_replay_response(cls, response): """ Converts real response to replay_response dict which can be saved and/or used to initialize a ReplayHTTPResponse. """ replay_response = {} body = response.read() # undecoded byte string # Add body to replay_response. Try to use simple text, falling back to # quoted printable or base64 as required for binary responses. if response.getheader('content-type', '') \ .startswith(cls.__text_content_types): if response.getheader('content-encoding') in ['gzip', 'deflate']: # http://stackoverflow.com/questions/2695152 body = zlib.decompress(body, 16 + zlib.MAX_WBITS) del response.msg['content-encoding'] # decompression changes the length if 'content-length' in response.msg: response.msg['content-length'] = str(len(body)) try: # Store body directly as text if it will decode properly. body.decode('utf8') replay_response['body_text'] = body except UnicodeDecodeError: # Store body as quoted printable. # Remove unneccessary =\n pairs which make searching hard. # These exist for line-wrapping in email, which is entirely # pointless here. body_quoted_printable = quopri.encodestring(body) body_quoted_printable = body_quoted_printable.replace('=\n', '') replay_response['body_quoted_printable'] = body_quoted_printable else: replay_response['body'] = body.encode('base64') replay_response.update(dict( status=dict(code=response.status, message=response.reason), headers=dict(response.getheaders()))) return replay_response def close(self): self.fp = None def isclosed(self): return self.fp is None def read(self, amt=None): """ The important parts of HTTPResponse.read() """ if self.fp is None: return '' if self._method == 'HEAD': self.close() return '' if self.length is not None: amt = min(amt, self.length) # StringIO doesn't like read(None) s = self.fp.read() if amt is None else self.fp.read(amt) if not s: self.close() if self.length is not None: self.length -= len(s) if not self.length: self.close() return s def getheader(self, name, default=None): return self.msg.getheader(name, default) def getheaders(self): return self.msg.items()