Example #1
0
 def open_local_file(self, req):
     host = req.get_host()
     file = req.get_selector()
     localfile = urllib2.url2pathname(file)
     stats = os.stat(localfile)
     size = stats.st_size
     modified = formatdate(stats.st_mtime, usegmt=True)
     mtype = mimetypes.guess_type(file)[0]
     headers = mimetools.Message(cStringIO.StringIO(
         'Content-type: %s\nContent-length: %d\nLast-modified: %s\n' %
         (mtype or 'text/plain', size, modified)))
     if host:
         host, port = urllib.splitport(host)
     if not host or \
        (not port and socket.gethostbyname(host) in self.get_names()):
         try:
           file_list = dircache.listdir(localfile)
           s = cStringIO.StringIO()
           s.write('<html><head><base href="%s"/></head><body>' % ('file:' + file))
           s.write('<p>Directory Content:</p>')
           for f in file_list:
             s.write('<p><a href="%s">%s</a></p>\n' % (urllib.quote(f), f))
           s.write('</body></html>')
           s.seek(0)
           headers = mimetools.Message(cStringIO.StringIO(
               'Content-type: %s\nContent-length: %d\nLast-modified: %s\n' %
               ('text/html', size, modified)))
           return urllib2.addinfourl(s, headers, 'file:' + file)
         except OSError:
           return urllib2.addinfourl(open(localfile, 'rb'),
                                     headers, 'file:'+file)
     raise urllib2.URLError('file not on local host')
Example #2
0
 def open_local_file(self, req):
     import email.Utils
     host = req.get_host()
     file = req.get_selector()
     localfile = url2pathname(file)
     stats = os.stat(localfile)
     size = stats.st_size
     modified = email.Utils.formatdate(stats.st_mtime, usegmt=True)
     mtype = mimetypes.guess_type(file)[0]
     headers = mimetools.Message(StringIO(
         'Content-type: %s\nContent-length: %d\nLast-modified: %s\n' %
         (mtype or 'text/plain', size, modified)))
     if host:
         host, port = splitport(host)
     if not host or \
        (not port and socket.gethostbyname(host) in self.get_names()):
         try:
           file_list = dircache.listdir(localfile)
           s = StringIO()
           s.write('<html><head><base href="%s"/></head><body>' % ('file:' + file))
           s.write('<p>Directory Content:</p>')
           for f in file_list:
             s.write('<p><a href="%s">%s</a></p>\n' % (urllib.quote(f), f))
           s.write('</body></html>')
           s.seek(0)
           headers = mimetools.Message(StringIO(
               'Content-type: %s\nContent-length: %d\nLast-modified: %s\n' %
               ('text/html', size, modified)))
           return addinfourl(s, headers, 'file:' + file)
         except OSError:
           return addinfourl(open(localfile, 'rb'),
                             headers, 'file:'+file)
     raise URLError('file not on local host')
Example #3
0
 def check_zip(self, resp):
     """
     @Def:  check_zip
     @Description:   check web page if it's ziped
     @Input: resp #response of web 
     @Output:  data #web page data
     @Date:  2015-8-10
     """
     if resp.headers.get("content-encoding") == "gzip":
         lib.log_print.log_print_debug("Zip page.")
         data = resp.read()
         gz = gzip.GzipFile(fileobj=StringIO.StringIO(data))
         new_resp = urllib2.addinfourl(gz, resp.headers, resp.url, resp.code)
         new_resp.msg = resp.msg
         return new_resp
     elif resp.headers.get("content-encoding") == "deflate":
         lib.log_print.log_print_debug(" Deflate page. url:  %s" % (resp.geturl()))
         data = resp.read()
         gz = StringIO.StringIO(self._deflate(data))
         new_resp = urllib2.addinfourl(gz, resp.headers, resp.url, resp.code)
         new_resp.msg = resp.msg
         return new_resp
     else:
         lib.log_print.log_print_debug(
             " Unzip page. Encoding type:  %s,  url:  %s"
             % (str(resp.headers.get("content-encoding")), resp.geturl())
         )
         return resp
Example #4
0
def GetLastUpdateTime():	
	last_update = None
	url = 'http://hq.sinajs.cn/rn=1389255083862&list=gb_dji'
	data = ''
	user_agent = 'Mozilla/5.0 (Windows NT 5.1; rv:12.0) Gecko/20100101 Firefox/12.0'
	headers = { 'User-Agent' : user_agent,
	'Host': 'hq.sinajs.cn', 
	'Accept': '*/*',
	'Accept-Language': 'en-us,en;q=0.5',
	'Accept-Encoding': 'gzip, deflate',
	#'Connection': 'keep-alive',
	'Referer': 'http://finance.sina.com.cn/stock/usstock/sector.shtml',
	}

	req = urllib2.Request(url, data, headers)
	resp = urllib2.urlopen(req)
	old_resp = resp

	if resp.headers.get("content-encoding") == "gzip":
		gz = GzipFile(
					fileobj=StringIO(resp.read()),
					mode="r"
				)
		#resp = urllib2.addinfourl(gz, old_resp.headers, old_resp.url, old_resp.code)
		resp = urllib2.addinfourl(gz, old_resp.headers, old_resp.url)
		resp.msg = old_resp.msg
		#json_html = gz.read()
		#print 'xxx'
	# deflate
	if resp.headers.get("content-encoding") == "deflate":
		gz = StringIO( deflate(resp.read()) )
		#resp = urllib2.addinfourl(gz, old_resp.headers, old_resp.url, old_resp.code)  # 'class to add info() and
		resp = urllib2.addinfourl(gz, old_resp.headers, old_resp.url)  # 'class to add info() and
		resp.msg = old_resp.msg
		#json_html = gz.read()
		#print 'YY'
	js_script = resp.read()
	#print js_script
	last_update_time = js_script.split(',')[25]
	#print last_update_time
	#Jan 08 04:13PM EST
	ridx = last_update_time.rfind('EST')
	
	if ridx == -1:
		#try:
		raise ValueError('Timezone EST value not set')
		#except ValueError, e:
		#	print e
	else:
		last_update_time = last_update_time[:ridx].strip()
		print last_update_time
		last_update = datetime.datetime.strptime(last_update_time, "%b %d %I:%M%p")
		last_update = last_update.replace(datetime.date.today().year)
		print last_update
	
	return last_update
def GetLastUpdateTime():
    last_update = None
    url = 'http://hq.sinajs.cn/rn=1389255083862&list=gb_dji'
    data = ''
    user_agent = 'Mozilla/5.0 (Windows NT 5.1; rv:12.0) Gecko/20100101 Firefox/12.0'
    headers = {
        'User-Agent': user_agent,
        'Host': 'hq.sinajs.cn',
        'Accept': '*/*',
        'Accept-Language': 'en-us,en;q=0.5',
        'Accept-Encoding': 'gzip, deflate',
        #'Connection': 'keep-alive',
        'Referer': 'http://finance.sina.com.cn/stock/usstock/sector.shtml',
    }

    req = urllib2.Request(url, data, headers)
    resp = urllib2.urlopen(req)
    old_resp = resp

    if resp.headers.get("content-encoding") == "gzip":
        gz = GzipFile(fileobj=StringIO(resp.read()), mode="r")
        #resp = urllib2.addinfourl(gz, old_resp.headers, old_resp.url, old_resp.code)
        resp = urllib2.addinfourl(gz, old_resp.headers, old_resp.url)
        resp.msg = old_resp.msg
        #json_html = gz.read()
        #print 'xxx'
    # deflate
    if resp.headers.get("content-encoding") == "deflate":
        gz = StringIO(deflate(resp.read()))
        #resp = urllib2.addinfourl(gz, old_resp.headers, old_resp.url, old_resp.code)  # 'class to add info() and
        resp = urllib2.addinfourl(gz, old_resp.headers,
                                  old_resp.url)  # 'class to add info() and
        resp.msg = old_resp.msg
        #json_html = gz.read()
        #print 'YY'
    js_script = resp.read()
    #print js_script
    last_update_time = js_script.split(',')[25]
    #print last_update_time
    #Jan 08 04:13PM EST
    ridx = last_update_time.rfind('EST')

    if ridx == -1:
        #try:
        raise ValueError('Timezone EST value not set')
        #except ValueError, e:
        #	print e
    else:
        last_update_time = last_update_time[:ridx].strip()
        print last_update_time
        last_update = datetime.datetime.strptime(last_update_time,
                                                 "%b %d %I:%M%p")
        last_update = last_update.replace(datetime.date.today().year)
        print last_update

    return last_update
Example #6
0
 def http_response(self, request, response):
     old_response = response
     if response.headers.get('Content-Encoding') == 'gzip':
         gz = gzip.GzipFile(fileobj=StringIO.StringIO(response.read() ), mode='r')
         response = urllib2.addinfourl(gz, old_response.headers, old_response.url, old_response.code)
         response.msg = old_response.msg
     if response.headers.get('Content-Encoding') == 'deflate':
         gz = StringIO.StringIO(zlib.decompressobj(-zlib.MAX_WBITS).decompress(response.read() ) )
         response = urllib2.addinfourl(gz, old_response.headers, old_response.url, old_response.code)
         response.msg = old_response.msg
     return response
Example #7
0
	def http_response(self, request, response):
		old_response = response
		if response.headers.get("content-encoding") == "gzip":
			gzipFile = gzip.GzipFile(fileobj=StringIO.StringIO(response.read()),mode="r")
			response = urllib2.addinfourl(gzipFile, old_response.headers, old_response.url, old_response.code)
			response.msg = old_response.msg
		if response.headers.get("content-encoding") == "deflate":
			gzipFile = StringIO.StringIO(deflate(response.read()) )
			response = urllib2.addinfourl(gzipFile, old_response.headers, old_response.url, old_response.code)
			response.msg = old_response.msg
		return response
Example #8
0
 def http_response(self, req, resp):
     old_resp = resp
     if resp.headers.get("content-encoding") == "gzip":
         gz = gzip.GzipFile( fileobj=BytesIO(resp.read()), mode="r" )
         resp = urllib2.addinfourl(gz, old_resp.headers, old_resp.url, old_resp.code)
         resp.msg = old_resp.msg
         # deflate
     if resp.headers.get("content-encoding") == "deflate":
         gz = BytesIO( self.deflate(resp.read()) )
         resp = urllib2.addinfourl(gz, old_resp.headers, old_resp.url, old_resp.code)
         resp.msg = old_resp.msg
     return resp
Example #9
0
	def http_response(self, req, resp):
		old_resp = resp
		# gzip
		if resp.headers.get('content-encoding') == 'gzip':
			gz = GzipFile(fileobj=BytesIO(resp.read()), mode='r')
			resp = urllib2.addinfourl(gz, old_resp.headers, old_resp.url, old_resp.code)
			resp.msg = old_resp.msg
		# deflate
		if resp.headers.get('content-encoding') == 'deflate':
			gz = BytesIO( deflate(resp.read()) )
			resp = urllib2.addinfourl(gz, old_resp.headers, old_resp.url, old_resp.code)  # 'class to add info() and
			resp.msg = old_resp.msg
		return resp
Example #10
0
 def http_response(self, req, resp):
     old_resp = resp
     if resp.headers.get("content-encoding") == "gzip":
         data = gzip.GzipFile(fileobj=StringIO(resp.read()), mode="r")
         resp = urllib2.addinfourl(data, old_resp.headers,
                                   old_resp.url, old_resp.code)
         resp.msg = old_resp.msg
     if resp.headers.get("content-encoding") == "deflate":
         data = zlib.decompress(resp.read(), -zlib.MAX_WBITS)
         resp = urllib2.addinfourl(data, old_resp.headers,
                                   old_resp.url, old_resp.code)
         resp.msg = old_resp.msg
     return resp
 def mock_response(self, req):
     if req.get_full_url() == "http://localhost/test":
         resp = urllib2.addinfourl(StringIO("mock file"), "mock message", req.get_full_url())
         resp.code = 200
         resp.msg = "OK"
         return resp
     if req.get_full_url() == "http://localhost/typo":
         resp = urllib2.addinfourl(StringIO("mock file"), "mock message", req.get_full_url())
         resp.code = 404
         resp.msg = "Not Found"
         return resp    
     else:
         raise SystemExit, 2
Example #12
0
 def http_response(self, req, resp):
     old_resp = resp
     if resp.headers.get("content-encoding") == "gzip":
         data = gzip.GzipFile(fileobj=StringIO(resp.read()), mode="r")
         resp = urllib2.addinfourl(data, old_resp.headers, old_resp.url,
                                   old_resp.code)
         resp.msg = old_resp.msg
     if resp.headers.get("content-encoding") == "deflate":
         data = zlib.decompress(resp.read(), -zlib.MAX_WBITS)
         resp = urllib2.addinfourl(data, old_resp.headers, old_resp.url,
                                   old_resp.code)
         resp.msg = old_resp.msg
     return resp
Example #13
0
File: web.py Project: libyyu/flib
 def http_response(self, req, resp):
     old_resp = resp
     if resp.headers.get('content-encoding') == 'gzip':
         gz = GzipFile(fileobj=StringIO(resp.read()), mode='r')
         resp = addinfourl(gz, old_resp.headers, old_resp.url,
                           old_resp.code)
         resp.msg = old_resp.msg
     if resp.headers.get('content-encoding') == 'deflate':
         gz = StringIO(deflate(resp.read()))
         resp = addinfourl(gz, old_resp.headers, old_resp.url,
                           old_resp.code)
         resp.msg = old_resp.msg
     return resp
Example #14
0
 def http_response(self, req, resp):
     old_resp = resp
     # gzip
     if resp.headers.get("content-encoding") == "gzip":
         gz = GzipFile(fileobj=StringIO(resp.read()), mode="r")
         resp = urllib2.addinfourl(gz, old_resp.headers, old_resp.url, old_resp.code)
         resp.msg = old_resp.msg
     # deflate
     if resp.headers.get("content-encoding") == "deflate":
         gz = StringIO(deflate(resp.read()))
         resp = urllib2.addinfourl(gz, old_resp.headers, old_resp.url, old_resp.code)  # 'class to add info() and
         resp.msg = old_resp.msg
     return resp
Example #15
0
 def http_open(self, req):
     url = req.get_full_url()
     try:
         content = self.urls[url]
     except KeyError:
         resp = urllib2.addinfourl(StringIO(""), None, url)
         resp.code = 404
         resp.msg = "OK"
     else:
         resp = urllib2.addinfourl(StringIO(content), None, url)
         resp.code = 200
         resp.msg = "OK"
     return resp
Example #16
0
 def http_open(self, req):
     url = req.get_full_url()
     try:
         content = self.urls[url]
     except KeyError:
         resp = urllib2.addinfourl(StringIO(""), None, url)
         resp.code = 404
         resp.msg = "OK"
     else:
         resp = urllib2.addinfourl(StringIO(content), None, url)
         resp.code = 200
         resp.msg = "OK"
     return resp
Example #17
0
 def http_response(self, req, resp):
     old_resp = resp
     # gzip
     if resp.headers.get("content-encoding") == "gzip":
         gz = GzipFile(fileobj=StringIO(resp.read()), mode="r")
         resp = urllib2.addinfourl(gz, old_resp.headers, old_resp.url,
                                   old_resp.code)
         resp.msg = old_resp.msg
     # deflate
     if resp.headers.get("content-encoding") == "deflate":
         gz = StringIO(deflate(resp.read()))
         resp = urllib2.addinfourl(gz, old_resp.headers, old_resp.url,
                                   old_resp.code)
         resp.msg = old_resp.msg
     return resp
 def mock_response(self, req):
     if req.get_full_url() == "http://localhost/test":
         resp = urllib2.addinfourl(StringIO("mock file"),
                                   "mock message", req.get_full_url())
         resp.code = 200
         resp.msg = "OK"
         return resp
     if req.get_full_url() == "http://localhost/typo":
         resp = urllib2.addinfourl(StringIO("mock file"),
                                   "mock message", req.get_full_url())
         resp.code = 404
         resp.msg = "Not Found"
         return resp
     else:
         raise SystemExit, 2
def to_addinfourl(response):
    """Convert a `django.http.HttpResponse` to a `urllib2.addinfourl`."""
    headers_raw = response.serialize_headers()
    headers = httplib.HTTPMessage(io.BytesIO(headers_raw))
    return urllib2.addinfourl(
        fp=io.BytesIO(response.content), headers=headers,
        url=None, code=response.status_code)
Example #20
0
def extract(resp):
    data = resp.read()
    # print type(data)
    # print data
    try:
        # print len(data)
        if resp.headers.get('content-encoding') == 'gzip':
            data = gzip(data)
            # print data
        if resp.headers.get('content-encoding') == 'deflate':
            data = deflate(data)
            # print data
            # print len(data)
            # print resp.headers
            # print resp.url
            # print resp.code
            # resp = urllib2.addinfourl(fp=data, headers=resp.headers, url=resp.url, code=resp.code)
            # print type(data)
            # print resp.headers
            # print len(resp.read())
            # print dir(resp)
    except Exception as err:
        print err

    resp = urllib2.addinfourl(data,
                              headers=resp.headers,
                              url=resp.url,
                              code=resp.code)
    # print resp.read()
    return resp
	def test_urlopen_success(self):
		response = urllib2.addinfourl(self.fp, self.headers, self.url)
		response.code = 200
		response.msg = "OK"
		with mock.patch('urllib2.urlopen') as mock_urlopen:
			mock_urlopen.return_value = response
			urlopen(self.url)
Example #22
0
File: util.py Project: afajl/sy
    def http_open(self, req):
        if self.resp_func:
            return resp_func(req)

        if self.mock_url and self.mock_url != req.get_full_url():
            resp = urllib2.addinfourl(StringIO('Not Found'), 'Not Found', 
                                      req.get_full_url()) 
            resp.code = 404
            resp.msg = 'Not Found'
            return resp

        resp = urllib2.addinfourl(StringIO(self.mock_file), self.mock_msg,
                                  req.get_full_url())
        resp.code = self.mock_code
        resp.msg = self.mock_resp_msg
        return resp
Example #23
0
    def http_open(self, req):
        requests.append(dict(
            url=req.get_full_url(),
            contenttype=req.headers['Content-type'],
            auth=base64.decodestring(req.headers['Authorization'].replace('Basic ', '')),
            data=json.loads(req.data),
        ))

        # Sanitise response
        global nextResponse
        if 'id' not in nextResponse:
            nextResponse['id'] = requests[-1]['data']['id']
        if 'error' not in nextResponse:
            nextResponse['error'] = None
        if 'result' not in nextResponse:
            nextResponse['result'] = 1234

        resp = urllib2.addinfourl(
            StringIO(nextResponse['_data'] if '_data' in nextResponse else json.dumps(nextResponse)),
            "Message of some form",
            req.get_full_url(),
        )
        resp.code = nextResponse['_code'] if '_code' in nextResponse else 200
        resp.msg = nextResponse['_msg'] if '_msg' in nextResponse else "OK"
        nextResponse = dict()
        return resp
Example #24
0
def mock_response(req, resp_obj, resp_code):
    """ Mock response for MyHTTPSHandler
    """
    resp = urllib2.addinfourl(StringIO(resp_obj), "This is a mocked URI!", req.get_full_url())
    resp.code = resp_code
    resp.msg = "OK"
    return resp
Example #25
0
    def http_response(self, req, resp):
        # code for after-fetch, to know whether to save to hard-drive (if stiking to http headers' will)

        if resp.code == 304:
            return resp

        if ('cache-control' in resp.headers or 'pragma' in resp.headers) and self.force_min is None:
            cache_control = parse_http_list(resp.headers.get('cache-control', ()))
            cache_control += parse_http_list(resp.headers.get('pragma', ()))

            cc_list = [x for x in cache_control if '=' not in x]

            if 'no-cache' in cc_list or 'no-store' in cc_list or ('private' in cc_list and not self.private):
                # kindly follow web servers indications
                return resp

        if resp.headers.get('Morss') == 'from_cache':
            # it comes from cache, so no need to save it again
            return resp

        # save to disk
        data = resp.read()
        self._save(req.get_full_url(), resp.code, resp.msg, resp.headers, data, time.time())

        fp = BytesIO(data)
        old_resp = resp
        resp = addinfourl(fp, old_resp.headers, old_resp.url, old_resp.code)
        resp.msg = old_resp.msg

        return resp
def extract(resp):
    data = resp.read()
    # print type(data)
    # print data
    try:
        # print len(data)
        if resp.headers.get('content-encoding') == 'gzip':
            data = gzip(data)
            # print data
        if resp.headers.get('content-encoding') == 'deflate':
            data = deflate(data)
            # print data
            # print len(data)
            # print resp.headers
            # print resp.url
            # print resp.code
            # resp = urllib2.addinfourl(fp=data, headers=resp.headers, url=resp.url, code=resp.code)
            # print type(data)
            # print resp.headers
            # print len(resp.read())
            # print dir(resp)
    except Exception as err:
        print err

    resp = urllib2.addinfourl(data, headers=resp.headers, url=resp.url, code=resp.code)
    # print resp.read()
    return resp
Example #27
0
 def response(self, req):
     data = self.content(req)
     url = req.get_full_url()
     resp = urllib2.addinfourl(StringIO(data), data, url)
     resp.code = self.code
     resp.msg = self.msg
     return resp
Example #28
0
 def http_open(self, req):
     if req.get_full_url().startswith("https://api.github.com/users/tester/subscriptions"):
         response_file = "test_data/good_subscription_response.json"
         resp = urllib2.addinfourl(open(response_file, "r"), "mock", req.get_full_url())
         resp.code = 200
         resp.msg = "OK"
         return resp
Example #29
0
 def http_response(self, _, resp):
     if resp.headers.get("content-encoding") == "gzip":
         gz = GzipFile(fileobj=StringIO(resp.read()), mode="r")
         old_resp = resp
         resp = urllib2.addinfourl(gz, old_resp.headers, old_resp.url, old_resp.code)
         resp.msg = old_resp.msg
     return resp
 def https_open(self, req):
     if '/tree/' in req.get_full_url():
         resp = urllib2.addinfourl(StringIO(TEST_GET_JOBS_RETURN), "",
                                   req.get_full_url())
         resp.code = 200
         resp.msg = "OK"
         return resp
Example #31
0
 def sftp_open(self, req):
     url = req.get_full_url()
     p = urlparse.urlparse(url)
     try:
         transport = paramiko.Transport((p.hostname, (p.port or 22)))
         transport.connect(username=p.username, password=p.password)
         sftp = paramiko.SFTPClient.from_transport(transport)
         filename = os.path.basename(p.path)
         path_to_file = os.path.dirname(p.path)
         sftp.chdir(path_to_file)
         size = sftp.stat(filename).st_size
         content_type = mimetypes.guess_type(
             filename)[0] or 'application/octet-stream'
         headers = {'Content-type': content_type, 'Content-length': size}
         fp = sftp.open(filename)
         return urllib2.addinfourl(fp, headers, url)
     except paramiko.ssh_exception.BadAuthenticationType as e:  # Bad Login Credentials
         raise urllib2.HTTPError(url=url,
                                 code=403,
                                 msg=e,
                                 hdrs=None,
                                 fp=None)
     except IOError as e:  # File not found
         raise urllib2.HTTPError(url=url,
                                 code=404,
                                 msg=e,
                                 hdrs=None,
                                 fp=None)
     except Exception as e:
         raise urllib2.HTTPError(url=url,
                                 code=400,
                                 msg=e,
                                 hdrs=None,
                                 fp=None)
  def test_get_swarm_results_success(self):
    self.mox.StubOutWithMock(swarm_results.urllib2, 'urlopen')

    shard_output = json.dumps(
      {'machine_id': 'host',
       'machine_tag': 'localhost',
       'exit_codes': '0, 0',
       'output': SWARM_OUTPUT_WITHOUT_FAILURE
     }
    )

    url_response = urllib2.addinfourl(StringIO.StringIO(shard_output),
                                      "mock message", 'host')
    url_response.code = 200
    url_response.msg = "OK"
    swarm_results.urllib2.urlopen('http://host:9001/get_result?r=key1'
                                  ).AndReturn(url_response)
    swarm_results.urllib2.urlopen('http://host:9001/cleanup_results',
                                  data=urllib.urlencode({'r': 'key1'})
                                  ).AndReturn(StringIO.StringIO(''))
    self.mox.ReplayAll()

    swarm_results.GetSwarmResults('http://host:9001', ['key1'])

    self.checkstdout(BUILDBOT_OUTPUT)

    self.mox.VerifyAll()
Example #33
0
def mock_response(req):
    if req.get_full_url() == ip_address_uri:
        mock_file = open('test_pytomo/OdF-oiaICZI.flv')
        resp = urllib2.addinfourl(mock_file, info, req.get_full_url())
        resp.code = 200
        resp.msg = "OK"
        return resp
def mock_response(req):
    url= req.get_full_url().split('/')[-1]
    if url in URLS.keys():
        resp = urllib2.addinfourl(StringIO.StringIO(URLS[url]), "mock message", req.get_full_url())
        resp.code = 200
        resp.msg = "OK"
        return resp
Example #35
0
def mock_response(req):
	url = req.get_full_url()
	if url == 'http://geocoder.demozoo.org/?q=Oxford':
		body = """[
			{"name": "Oxford, Oxfordshire, England, United Kingdom", "id": 2640729},
			{"name": "Oxford, Butler County, Ohio, United States", "id": 4520760},
			{"name": "Oxford, Calhoun County, Alabama, United States", "id": 4081914}
		]"""
	elif url == 'http://geocoder.demozoo.org/2640729/':
		body = """{
			"full_name": "Oxford, Oxfordshire, England, United Kingdom",
			"latitude": 51.75222, "longitude": -1.25596,
			"country_name": "United Kingdom", "name": "Oxford", "id": 2640729, "country_code": "GB"
		}"""
	elif url == 'http://geocoder.demozoo.org/?q=Adlington%2C+Lancashire%2C+England%2C+United+Kingdom':
		raise Exception("Looking up Adlington is not allowed! :-)")
	elif url == 'http://geocoder.demozoo.org/?q=Royston+Vasey':
		body = "[]"
	else:
		raise Exception("No response defined for %s" % req.get_full_url())

	resp = urllib2.addinfourl(StringIO(body), None, req.get_full_url())
	resp.code = 200
	resp.msg = "OK"
	return resp
Example #36
0
    def http_response(self, req, resp):
        contenttype = resp.info().get('Content-Type', '').split(';')[0]
        if 200 <= resp.code < 300 and len(
                self.follow) and contenttype in MIMETYPE[
                    'html'] and contenttype not in self.follow:
            # opps, not what we were looking for, let's see if the html page suggests an alternative page of the right types

            data = resp.read()
            links = lxml.html.fromstring(
                data[:10000]).findall('.//link[@rel="alternate"]')

            for link in links:
                if link.get('type', '') in self.follow:
                    resp.code = 302
                    resp.msg = 'Moved Temporarily'
                    resp.headers['location'] = link.get('href')
                    break

            fp = BytesIO(data)
            old_resp = resp
            resp = addinfourl(fp, old_resp.headers, old_resp.url,
                              old_resp.code)
            resp.msg = old_resp.msg

        return resp
Example #37
0
 def http_open(self, req):
     req_url = req.get_full_url()
     file_path = req_url.replace(base_http_url, base_file_path)
     resp = urllib2.addinfourl(open(file_path), "redirected to file", req.get_full_url())
     resp.code = 200
     resp.msg = "OK"
     return resp
Example #38
0
def mock_response(req):
    if req.get_full_url() == "http://www.npr.org/rss/rss.php?id=1001":
        resp = urllib2.addinfourl(StringIO("mock response:"), "mock message",
                                  req.get_full_url())
        resp.code = 200
        resp.msg = "OK"
        return resp
Example #39
0
  def test_get_swarm_results_success(self):
    self.mox.StubOutWithMock(swarm_results.urllib2, 'urlopen')

    shard_output = json.dumps(
      {'hostname': 'host',
       'exit_codes': '0, 0',
       'output': SWARM_OUTPUT_WITHOUT_FAILURE
     }
    )

    url_response = urllib2.addinfourl(StringIO.StringIO(shard_output),
                                      "mock message", 'host')
    url_response.code = 200
    url_response.msg = "OK"
    swarm_results.urllib2.urlopen('http://host:9001/get_result?r=key1'
                                  ).AndReturn(url_response)
    swarm_results.urllib2.urlopen('http://host:9001/cleanup_results',
                                  mox.IgnoreArg())
    self.mox.ReplayAll()

    swarm_results.GetSwarmResults('http://host:9001', ['key1'])

    self.checkstdout(BUILDBOT_OUTPUT)

    self.mox.VerifyAll()
Example #40
0
 def response(self, req):
     data = self.content(req)
     url = req.get_full_url()
     resp = urllib2.addinfourl(StringIO(data), data, url)
     resp.code = self.code
     resp.msg = self.msg
     return resp
Example #41
0
    def http_response(self, req, resp):
        # code for after-fetch, to know whether to save to hard-drive (if stiking to http headers' will)

        if resp.code == 304:
            return resp

        if ('cache-control' in resp.headers
                or 'pragma' in resp.headers) and self.force_min is None:
            cache_control = parse_http_list(
                resp.headers.get('cache-control', ()))
            cache_control += parse_http_list(resp.headers.get('pragma', ()))

            cc_list = [x for x in cache_control if '=' not in x]

            if 'no-cache' in cc_list or 'no-store' in cc_list or (
                    'private' in cc_list and not self.private):
                # kindly follow web servers indications
                return resp

        if resp.headers.get('Morss') == 'from_cache':
            # it comes from cache, so no need to save it again
            return resp

        # save to disk
        data = resp.read()
        self._save(req.get_full_url(), resp.code, resp.msg, resp.headers, data,
                   time.time())

        fp = BytesIO(data)
        old_resp = resp
        resp = addinfourl(fp, old_resp.headers, old_resp.url, old_resp.code)
        resp.msg = old_resp.msg

        return resp
 def fake_response(self, req):
     grader_endpoint = getattr(settings, 'GRADER_ENDPOINT', 'localhost')
     if req.get_full_url() == grader_endpoint:
         resp = urllib2.addinfourl(StringIO(self.answer), "", req.get_full_url())
         resp.code = 200
         resp.msg = "OK"
         return resp
Example #43
0
def mock_response(req):
    url = req.get_full_url()
    if url == 'http://geocoder.demozoo.org/?q=Oxford':
        body = """[
			{"name": "Oxford, Oxfordshire, England, United Kingdom", "id": 2640729},
			{"name": "Oxford, Butler County, Ohio, United States", "id": 4520760},
			{"name": "Oxford, Calhoun County, Alabama, United States", "id": 4081914}
		]"""
    elif url == 'http://geocoder.demozoo.org/2640729/':
        body = """{
			"full_name": "Oxford, Oxfordshire, England, United Kingdom",
			"latitude": 51.75222, "longitude": -1.25596,
			"country_name": "United Kingdom", "name": "Oxford", "id": 2640729, "country_code": "GB"
		}"""
    elif url == 'http://geocoder.demozoo.org/?q=Adlington%2C+Lancashire%2C+England%2C+United+Kingdom':
        raise Exception("Looking up Adlington is not allowed! :-)")
    elif url == 'http://geocoder.demozoo.org/?q=Royston+Vasey':
        body = "[]"
    else:
        raise Exception("No response defined for %s" % req.get_full_url())

    resp = urllib2.addinfourl(StringIO(body), None, req.get_full_url())
    resp.code = 200
    resp.msg = "OK"
    return resp
Example #44
0
 def test_ignore_non_index_etag(self):
     """ Etags are ignored for non-index requests """
     req = urllib2.Request('http://foo.com/')
     res = urllib2.addinfourl(StringIO('foo'), {'Etag': 'whatevs'}, 'http://foo.com/')
     self.cache_handler.http_response(req, res)
     self.assertFalse(os.path.exists(self.metadata_path))
     self.assertFalse(os.path.exists(self.index_path))
Example #45
0
 def http_response(self, req, resp):
     old_resp = resp
     if not req.has_header('Accept-encoding'):
         return resp
     if req.has_header('Range'):
         return resp
     if resp.headers.get("content-encoding") == "gzip":
         gz = _GzipFile(fileobj=StringIO(resp.read()), mode="r")
         resp = urllib2.addinfourl(gz, old_resp.headers, old_resp.url, old_resp.code)
         resp.msg = old_resp.msg
         # deflate
     if resp.headers.get("content-encoding") == "deflate":
         gz = StringIO(self.deflate(resp.read()))
         resp = urllib2.addinfourl(gz, old_resp.headers, old_resp.url, old_resp.code)
         resp.msg = old_resp.msg
     return resp
Example #46
0
def mock_response(req):
    if req.get_full_url() == 'http://sourcetest.com/rss.xml':
        resp = urllib2.addinfourl(StringIO.StringIO(rss_source_xml),
                                  'mock message', req.get_full_url())
        resp.code = 200
        resp.msg = "OK"
        return resp
Example #47
0
 def test_no_etag(self):
     """ No Etag header == no cache written """
     req = urllib2.Request('http://foo.com/index.json')
     res = urllib2.addinfourl(StringIO('foo'), {}, 'http://foo.com/index.json')
     self.cache_handler.http_response(req, res)
     self.assertFalse(os.path.exists(self.metadata_path))
     self.assertFalse(os.path.exists(self.index_path))
Example #48
0
 def testscheme_open(self, req):
     try:
         selector = req.get_selector()
         if selector == u'/ws_newcompass.asmx?WSDL':
             return urllib.addinfourl(
                 pkg_resources.resource_stream(__name__, 'tests/testdata/wsdl.xml'),
                 httplib.HTTPMessage(open('/dev/null')),
                 req.get_full_url(),
                 200
             )
         elif selector == u'/ws_newcompass.asmx':
             soapResponse = urlparse.urlparse(req.get_header('Soapaction')).path.strip('"').split('/')[-1] + '.xml'
             return urllib.addinfourl(
                 pkg_resources.resource_stream(__name__, 'tests/testdata/' + soapResponse),
                 httplib.HTTPMessage(open('/dev/null')),
                 req.get_full_url(),
                 200
             )
         elif selector == u'/biomuta.tsv':
             return urllib2.addinfourl(
                 pkg_resources.resource_stream(__name__, 'tests/testdata/Biomuta.tsv'),
                 httplib.HTTPMessage(open('/dev/null')),
                 req.get_full_url(),
                 200
             )
         else:
             raise urllib2.URLError('Not found')
     except Exception:
         raise urllib2.URLError('Not found')
Example #49
0
 def mock_response(req):
     "Mock Response class"
     if req.get_full_url() == VIDEO_URL:
         mock_file = open(FLASH_FILE)
         resp = urllib2.addinfourl(mock_file, INFO, req.get_full_url())
         resp.code = 200
         resp.msg = "OK"
         return resp
Example #50
0
def mock_responses(request, code, msg, cors):
    """Mock responses."""
    response = urllib2.addinfourl(StringIO('mock file'),
                                  {'Access-Control-Allow-Origin': cors},
                                  request.get_full_url())
    response.code = code
    response.msg = msg
    return response
Example #51
0
 def fake_response(self, req):
     grader_endpoint = getattr(settings, 'GRADER_ENDPOINT', 'localhost')
     if req.get_full_url() == grader_endpoint:
         resp = urllib2.addinfourl(StringIO(self.answer), "",
                                   req.get_full_url())
         resp.code = 200
         resp.msg = "OK"
         return resp
Example #52
0
 def test_ignore_non_index_etag(self):
     """ Etags are ignored for non-index requests """
     req = urllib2.Request('http://foo.com/')
     res = urllib2.addinfourl(StringIO('foo'), {'Etag': 'whatevs'},
                              'http://foo.com/')
     self.cache_handler.http_response(req, res)
     self.assertFalse(os.path.exists(self.metadata_path))
     self.assertFalse(os.path.exists(self.index_path))
def generate_url_response(index, shard_output, exit_codes):
  url_response = urllib2.addinfourl(
      StringIO.StringIO(json.dumps(gen_data(index, shard_output, exit_codes))),
      'mock message',
      'host')
  url_response.code = 200
  url_response.msg = 'OK'
  return url_response
  def http_response(self, req, resp):
    """Handle encodings in the order that they are encountered."""
    encodings = []
    headers = resp.headers

    encoding_header = None
    for header in headers:
      if header.lower() == "content-encoding":
        encoding_header = header
        for encoding in headers[header].split(","):
          encoding = encoding.strip()
          if encoding:
            encodings.append(encoding)
        break

    if not encodings:
      return resp

    # encoding_header can't be None here as the above return on an empty list
    # of encodings would have prevented this line from being reached.
    del headers[encoding_header]

    fp = resp
    while encodings and encodings[-1].lower() == "gzip":
      fp = cStringIO.StringIO(fp.read())
      fp = gzip.GzipFile(fileobj=fp, mode="r")
      encodings.pop()

    if encodings:
      # Some unhandled encodings remain, leave them for other handlers.
      # There may be further encodings that we can handle nested within the
      # unhandled encoding.
      # TODO(user): The only resolution is to support more encodings.
      headers[encoding_header] = ", ".join(encodings)
      logger.warning("Unrecognized Content-Encoding: %s", encodings[-1])

    msg = resp.msg
    if sys.version_info >= (2, 6):
      resp = urllib2.addinfourl(fp, headers, resp.url, resp.code)
    else:
      response_code = resp.code
      resp = urllib2.addinfourl(fp, headers, resp.url)
      resp.code = response_code
    resp.msg = msg

    return resp
 def http_error_302(self, req, fp, code, msg, headers):
     infourl = urllib2.addinfourl(fp, headers, req.get_full_url())
     infourl.status = code
     infourl.code = code
     if headers['location']:
         return headers['location']
     else:
         return infourl
Example #56
0
 def mock_response(self, req):
     if req.get_full_url() in self.res_dict.keys():
         metadata = self.res_dict[req.get_full_url()]
         data = metadata['data']
         headers = metadata['headers']
         res_code = metadata['res_code']
         res_msg = metadata['res_msg']
         resp = urllib2.addinfourl(StringIO(data), headers, req.get_full_url())
         resp.code = res_code
         resp.msg = res_msg
         return resp
     else:
         resp = urllib2.addinfourl(StringIO("The url was not found in my dictionary!"),
                                    "mock message", req.get_full_url())
         resp.code = 404
         resp.msg = "Error"
         return resp
Example #57
0
 def test_no_etag(self):
     """ No Etag header == no cache written """
     req = urllib2.Request('http://foo.com/index.json')
     res = urllib2.addinfourl(StringIO('foo'), {},
                              'http://foo.com/index.json')
     self.cache_handler.http_response(req, res)
     self.assertFalse(os.path.exists(self.metadata_path))
     self.assertFalse(os.path.exists(self.index_path))
Example #58
0
 def http_response(self, req, resp):
     if resp.headers.get('content-encoding') == 'gzip':
         gz = gzip.GzipFile(fileobj=StringIO.StringIO(resp.read()), mode='r')
         old = resp
         resp = urllib2.addinfourl(gz, old.headers, old.url)
         resp.msg = old.msg
         resp.code = old.code # support for Python2.4/2.5
     return resp
Example #59
0
def to_addinfourl(response):
    """Convert a `django.http.HttpResponse` to a `urllib2.addinfourl`."""
    headers_raw = response.serialize_headers()
    headers = httplib.HTTPMessage(io.BytesIO(headers_raw))
    return urllib2.addinfourl(fp=io.BytesIO(response.content),
                              headers=headers,
                              url=None,
                              code=response.status_code)