예제 #1
0
    def post(self):
        #记录一个是否加密的状态变量
        is_crypted = int(self.request.body[0])

        req_body = lib.decrypt(self.request.body)
        req_body = lib.loadDict(req_body)

        method = getattr(urlfetch, req_body.command)

        # 如超时则自动重试4次,4次失败后,GAE会抛错并返回给client 500错误。
        for dl in lib.deadlineRetry:
            try:
                res = urlfetch.fetch(
                    url=req_body.path,
                    payload=lib.atob(req_body.payload),
                    method=method,
                    headers=json.loads(req_body.headers),
                    follow_redirects=False,
                    deadline=dl,
                    validate_certificate=True,
                )
            except urlfetch.DownloadError, e:
                logging.error(u'下载错误: %s' % e)
            else:
                break  #没有抛出任何异常则跳出循环
예제 #2
0
파일: main.py 프로젝트: Bitesher/keepagent
    def post(self):
        #记录一个是否加密的状态变量
        is_crypted = int(self.request.body[0])

        req_body = lib.decrypt(self.request.body)
        req_body = lib.loadDict(req_body)

        method = getattr(urlfetch, req_body.command)

        # 如超时则自动重试4次,4次失败后,GAE会抛错并返回给client 500错误。
        for dl in lib.deadlineRetry:
            try:
                res = urlfetch.fetch(url=req_body.path,
                                     payload=lib.atob(req_body.payload),
                                     method=method,
                                     headers=json.loads(req_body.headers),
                                     follow_redirects=False,
                                     deadline=dl,
                                     validate_certificate=True,
                                     )
            except urlfetch.DownloadError, e:
                logging.error(u'下载错误: %s' % e)
            else:
                break #没有抛出任何异常则跳出循环
예제 #3
0
    def do_GET(self):

        # headers is a dict-like object, it doesn't have `iteritems` method, so convert it to `dict`
        req_headers = dict(self.headers)  # dict
        req_headers = dict((h, v) for h, v in req_headers.iteritems() if h.lower() not in self.forbidden_headers)

        req_body_len = int(req_headers.get('content-length', 0))
        req_body = self.rfile.read(req_body_len) # bin or str

        payload = {
            'command': self.command, # str
            'path': self.path, # str
            'headers': json.dumps(req_headers), # json
            'payload': lib.btoa(req_body), # str
        }

        #导出并压缩payload
        payload = lib.dumpDict(payload)

        #判断是否需要加密
        if self.path.startswith('https'):
            payload = lib.encrypt(payload)
        else:
            payload = '0' + payload

        # 向GAE获取的过程
        for i in range(4):
            try:
                res = urllib2.urlopen(gaeServer, payload, lib.deadlineRetry[i])
            except (urllib2.URLError, socket.timeout) as e: 
                logging.error(e)
                continue

            if res.code == 200:  # 如果打开GAE没发生错误
                result = res.read()
                result = lib.decrypt(result)
                result = lib.loadDict( result )

                res_status_code = result.status_code
                res_headers = json.loads(result.headers)
                res_content = lib.atob(result.content)
                break
        else:
            # 如果urllib2打开GAE都出错的话,就换个g_opener吧。
            urllib2.install_opener( get_g_opener() ) 

        # 返回数据给浏览器的过程
        try:
            self.send_response(res_status_code) # 200 or or 301 or 404

            res_headers['connection'] = 'close' # 这样不会对速度造成影响,反而能使很多的请求表现得更为准确。
            for k, v in res_headers.iteritems():
                try:
                    self.send_header(k, v)
                except UnicodeEncodeError: # google plus里面就遇到了v包含中文的情况
                    pass
            self.end_headers()
            self.wfile.write(res_content)
        except socket.error, e:
            # 打开了网页后,在数据到达浏览器之前又把网页关闭了而导致的错误。
            logging.error(e)
예제 #4
0
    def do_GET(self):

        # headers is a dict-like object, it doesn't have `iteritems` method, so convert it to `dict`
        req_headers = dict(self.headers)  # dict
        req_headers = dict((h, v) for h, v in req_headers.iteritems() if h.lower() not in self.forbidden_headers)

        req_body_len = int(req_headers.get("content-length", 0))
        req_body = self.rfile.read(req_body_len)  # bin or str

        payload = {
            "command": self.command,  # str
            "path": self.path,  # str
            "headers": json.dumps(req_headers),  # json
            "payload": lib.btoa(req_body),  # str
        }

        # 导出并压缩payload
        payload = lib.dumpDict(payload)

        # 判断是否需要加密
        if self.path.startswith("https"):
            payload = lib.encrypt(payload)
        else:
            payload = "0" + payload

        # 向GAE获取的过程
        for i in range(4):
            try:
                res = urllib2.urlopen(gaeServer, payload, lib.deadlineRetry[i])
            except (urllib2.URLError, socket.timeout) as e:
                logging.error(e)
                continue

            if res.code == 200:  # 如果打开GAE没发生错误
                result = res.read()
                result = lib.decrypt(result)
                result = lib.loadDict(result)

                res_status_code = result.status_code
                res_headers = json.loads(result.headers)
                res_content = lib.atob(result.content)
                break
        else:
            # 如果urllib2打开GAE都出错的话,就换个g_opener吧。
            urllib2.install_opener(get_g_opener())

        # 返回数据给浏览器的过程
        try:
            self.send_response(res_status_code)  # 200 or or 301 or 404

            res_headers["connection"] = "close"  # 这样不会对速度造成影响,反而能使很多的请求表现得更为准确。
            for k, v in res_headers.iteritems():
                try:
                    self.send_header(k, v)
                except UnicodeEncodeError:  # google plus里面就遇到了v包含中文的情况
                    pass
            self.end_headers()
            self.wfile.write(res_content)
        except socket.error, e:
            # 打开了网页后,在数据到达浏览器之前又把网页关闭了而导致的错误。
            logging.error(e)