def httpRequest(url, values={}, headers={}, method='POST'): # Construct an Agent. agent = Agent(reactor) data = urllib.urlencode(values) if method == "POST" and values: headers['Content-Type'] = ["application/x-www-form-urlencoded"] d = agent.request(method, url, Headers(headers), StringProducer(data) if data else None) def handle_response(response): if response.code == 204: d = defer.succeed('') else: class SimpleReceiver(protocol.Protocol): def __init__(s, d): s.buf = ''; s.d = d def dataReceived(s, data): s.buf += data def connectionLost(s, reason): # TODO: test if reason is twisted.web.client.ResponseDone, # if not, do an errback s.d.callback(s.buf) d = defer.Deferred() response.deliverBody(SimpleReceiver(d)) return d d.addCallback(handle_response) return d
def twisted_fetch(self, url, runner): # http://twistedmatrix.com/documents/current/web/howto/client.html chunks = [] client = Agent(self.reactor) d = client.request('GET', url) class Accumulator(Protocol): def __init__(self, finished): self.finished = finished def dataReceived(self, data): chunks.append(data) def connectionLost(self, reason): self.finished.callback(None) def callback(response): finished = Deferred() response.deliverBody(Accumulator(finished)) return finished d.addCallback(callback) def shutdown(ignored): self.stop_loop() d.addBoth(shutdown) runner() self.assertTrue(chunks) return ''.join(chunks)
def asyncPost(): agent = Agent(reactor) headers = {'User-Agent':[self.userAgent], 'PRIVATE-TOKEN':[self._private_token]} class StringProducer(object): implements(IBodyProducer) def __init__(self): self.length = len(http_body) def startProducing(self, consumer): consumer.write(http_body) return defer.succeed(None) def stopProducing(self): pass def pauseProducing(self): pass def resumeProducing(self): pass response = yield agent.request(method, url, headers=Headers(headers), bodyProducer=StringProducer() if http_body else None) resp_headers = {} for k in response.headers._rawHeaders: resp_headers[k] = response.headers._rawHeaders[k][0]; isValid = self._parse_headers(resp_headers) if isValid: body = yield readBody(response) defer.returnValue(json.loads(body)) defer.returnValue(None)
def http_download(destination, url): class FileWriter(Protocol): def __init__(self, fp, fin): self.fp = fp self.tmp = fp.temporarySibling(".tmp") self.fh = self.tmp.open("w") self.fin = fin def dataReceived(self, bytes): self.fh.write(bytes) def connectionLost(self, reason): self.fh.close() if isinstance(reason.value, ResponseDone): self.tmp.moveTo(self.fp) self.fin.callback(None) else: self.fin.errback(reason) log.msg("Downloading jquery from {0}".format(url)) agent = Agent(reactor) d = agent.request("GET", url) def gotResponse(response): finished = Deferred() response.deliverBody(FileWriter(destination, finished)) return finished d.addCallback(gotResponse) return d
def _upload(reactor, url, project, revision, revision_date, benchmark, executable, environment, result_value, result_date, std_dev, max_value, min_value): data = { 'commitid': str(revision), 'revision_date': revision_date, 'project': project, 'benchmark': benchmark, 'environment': environment, 'executable': executable, 'result_value': str(result_value), 'result_date': result_date, 'std_dev': str(std_dev), 'max': str(max_value), 'min': str(min_value), } print('uploading', data) agent = Agent(reactor) d = agent.request('POST', url, None, StringProducer(urlencode(data))) def check(response): d = readBody(response) def read(body): print('body', repr(body)) if response.code != 200: raise Exception("Upload failed: %r" % (response.code,)) d.addCallback(read) return d d.addCallback(check) return d
def checkprocesses(self): self.pdone = False self.ptotalcount = len(self.processports) if self.ptotalcount == 0: self.success() return self.pcount = 0 for port in self.processports: def request(response): self.pcount += 1 if self.pcount == self.ptotalcount: self.pdone = True self.success() def err(response): log.msg("[NAT] Received Error in making a process connection. Port = " + str(port) + ". Reason = " + str(response)) if not self.pdone: self.pdone = True self.failure() def timeout(): if not self.pdone: self.failure() agent = Agent(reactor) d = agent.request( 'GET', 'http://' + self.host + ':' + str(port), Headers({'User-Agent': ['GridToGo Server']}), None) d.addCallback(request) d.addErrback(err) reactor.callLater(5, timeout)
def requestNewPage(headers, url, external_id): agent = Agent(reactor) request_d = agent.request('GET', url,Headers(headers),None) d = defer.Deferred() d.addCallback(parseNewPage, external_id) request_d.addCallback(downloadPage, d) return request_d
def _request( self, request, callback ) : ## Build URL url = self.origin + '/' + "/".join([ "".join([ ' ~`!@#$%^&*()+=[]\\{}|;\':",./<>?'.find(ch) > -1 and hex(ord(ch)).replace( '0x', '%' ).upper() or ch for ch in list(bit) ]) for bit in request]) agent = Agent(reactor) request = agent.request( 'GET', url, None ) def received(response): finished = Deferred() response.deliverBody(PubNubResponse(finished)) return finished def complete(data): try : obj = json.loads(data) except : obj = None callback(obj) request.addCallback(received) request.addBoth(complete) ## OLD """
def _post_api_twisted(path, payload, access_token=None): headers = {'Content-Type': ['application/json']} if access_token is not None: headers['X-Rollbar-Access-Token'] = [access_token] # Serialize this ourselves so we can handle error cases more gracefully payload = ErrorIgnoringJSONEncoder().encode(payload) url = urlparse.urljoin(SETTINGS['endpoint'], path) agent = TwistedHTTPClient(reactor, connectTimeout=SETTINGS.get('timeout', DEFAULT_TIMEOUT)) resp = yield agent.request( 'POST', url, TwistedHeaders(headers), StringProducer(payload)) r = requests.Response() r.status_code = resp.code r.headers.update(resp.headers.getAllRawHeaders()) bodyDeferred = Deferred() resp.deliverBody(ResponseAccumulator(resp.length, bodyDeferred)) body = yield bodyDeferred r._content = body _parse_response(path, SETTINGS['access_token'], payload, r) yield returnValue(None)
def request(method, url, data=None, session_id=None): agent = Agent(reactor) headers = { 'Cookie': ['XSRF-TOKEN=antani;'], 'X-XSRF-TOKEN': ['antani'] } if session_id: headers['X-Session'] = [str(session_id)] bodyProducer = None if data: bodyProducer = StringProducer(json.dumps(data)) try: response = yield agent.request(method, str(base_url + url), Headers(headers), bodyProducer) except Exception as exc: failed(exc, method, url, data) raise exc try: content_length = response.headers.getRawHeaders('content-length') except IndexError: content_length = None finished = defer.Deferred() response.deliverBody(BodyReceiver(finished, content_length)) response_body = yield finished try: d = json.loads(response_body) except Exception as exc: failed(exc, method, url, data, response, response_body) defer.returnValue(d)
def get(self, base_url, args = {}, username = None, password = None): #pool = HTTPConnectionPool(reactor, persistent=True) #pool.maxPersistentPerHost = 3 #pool.retryAutomatically = False agent = Agent(reactor) headers = { 'User-Agent': [self._USER_AGENT_] } if username: authorization = b64encode(username + ":" + password) headers['Authorization'] = [authorization] url = self._get_url(base_url, args) self.log.info('Requesting URL: %s' % url) d_agent = agent.request( 'GET', url, Headers(headers), None) d_agent.addCallback(self.cb_agent) d_agent.addErrback(self.cb_agent_err) self.d = defer.Deferred() return self.d
def __init__(self, auth_url, username, api_key, pool=None, proxy=None, extra_headers=None, verbose=False): self.auth_url = auth_url self.username = username self.api_key = api_key self.storage_url = None self.auth_token = None self.pool = pool if proxy: if ":" in proxy: addr, port = proxy.rsplit(":", 1) port = int(port) else: addr, port = proxy, 8000 endpoint = TCP4ClientEndpoint(reactor, addr, port) self.agent = ProxyAgent(endpoint, pool=self.pool) else: contextFactory = WebClientContextFactory() contextFactory.noisy = False self.agent = Agent(reactor, contextFactory, pool=self.pool) self.extra_headers = extra_headers self.verbose = verbose
def _process_queue(self): if self.searches.under_processing: return False if not self.searches.request_queue: return False self.searches.under_processing = True self.searches.errors = {} self.searches.tweets = [] self.searches.got_twt_data = [] self.searches.processed_request = self.searches.request_queue.pop(0) self.tweet_count = 0 debug_msg(str(self.searches.processed_request['search_spec'])) auth_sys = AuthProcessor() params_urlized = auth_sys.get_search_params(self.searches.processed_request['search_spec']) req_headers = auth_sys.get_headers(params_urlized) contextFactory = TwtClientContextFactory() agent = Agent(reactor, contextFactory) d = agent.request( 'GET', 'https://api.twitter.com/1.1/search/tweets.json' + '?' + params_urlized, Headers(req_headers), None) borders = TwtResponseBorders(self, self.searches) d.addCallback(borders.cbRequest) d.addBoth(borders.cbShutdown) return True
def _make_agents(self, auth_files): """ Configure the web clients that: * perform backchannel CAS ticket validation * proxy the target site """ self.connectionPool = HTTPConnectionPool(self.reactor) if auth_files is None or len(auth_files) == 0: agent = Agent(self.reactor, pool=self.connectionPool) else: extra_ca_certs = [] for ca_cert in auth_files: with open(ca_cert, "rb") as f: data = f.read() cert = crypto.load_certificate(crypto.FILETYPE_PEM, data) del data extra_ca_certs.append(cert) policy = CustomPolicyForHTTPS(extra_ca_certs) agent = Agent(self.reactor, contextFactory=policy, pool=self.connectionPool) if self.proxy_client_endpoint_s is not None: self.proxyConnectionPool = HTTPConnectionPool(self.reactor) self.proxy_agent = Agent.usingEndpointFactory( self.reactor, WebClientEndpointFactory(self.reactor, self.proxy_client_endpoint_s), pool=self.proxyConnectionPool) else: self.proxy_agent = agent if self.cas_client_endpoint_s is not None: self.casConnectionPool = HTTPConnectionPool(self.reactor) self.cas_agent = Agent.usingEndpointFactory( self.reactor, WebClientEndpointFactory(self.reactor, self.cas_client_endpoint_s), pool=self.casConnectionPool) else: self.cas_agent = agent
def test_round_trip(self): d = Deferred() self.worker.deliver_hook = lambda x: d.callback(None) self.worker.startWorker() self.test_worker.startWorker() params = { 'username': '******', 'password': '******', 'owner': 'owner', 'service': 'service', 'subservice': 'subservice', 'call-number': '+27831234567', 'origin': '12345', 'messageid': 'message_id', 'provider': 'provider', 'tariff': 0, 'text': 'message content', } agent = Agent(reactor) response = yield agent.request( 'POST', self.config['url'], Headers({ 'User-Agent': ['Vumi Vas2Net Transport'], 'Content-Type': ['application/x-www-form-urlencoded'], }), StringProducer(urlencode(params))) log.msg('Headers', list(response.headers.getAllRawHeaders())) self.assertTrue(response.headers.hasHeader('X-Nth-Smsid')) yield d
def getResource(path): agent = Agent(reactor) d = agent.request( 'GET', path, Headers({'User-Agent': ['playout proxy']}), None) def handle_response(response): if response.code == 206: return defer.succeed('') else: class SimpleReceiver(Protocol): def __init__(s, d): s.buf = '' s.d = d def dataReceived(s, data): s.buf += data def connectionLost(s, reason): if response.code < 300: s.d.callback(s.buf) else: s.d.errback(RuntimeError("Failed download: {} {}".format(response.code, response.phrase))) d = defer.Deferred() response.deliverBody(SimpleReceiver(d)) return d d.addCallback(handle_response) return d
def _make_short_url(self, long_url): self.request_in_flight = True api_uri = "https://git.io/" encoded = urllib.urlencode({"url": long_url}) body_producer = StringProducer(encoded) agent = Agent(reactor) d = agent.request('POST', api_uri, bodyProducer=body_producer) def onRequestComplete(data): self._onRequestComplete() return data def onResponse(response): if response.code != 201: onRequestComplete(None) return long_url self._onRequestComplete() return response.headers.getRawHeaders("Location")[-1] d.addCallback(onResponse) def onError(failure): return long_url d.addErrback(onError) d.addErrback(onRequestComplete) return d
def main(): contextFactory = WebClientContextFactory() agent = Agent(reactor, contextFactory) d = agent.request("GET", "https://example.com/") d.addCallbacks(display, err) d.addCallback(lambda ignored: reactor.stop()) reactor.run()
def main(): from twisted.internet import reactor agent = Agent(reactor) uri = "http://localhost:"+str(HTTP_PORT)+"/image/" args = "?%s=%s&%s=%s&%s=%d" %(IMAGE_UID_KEY, "1d0a0cde6aea6ad997307f01edd5c60b", USER_UID_KEY, "", IS_CLIENT_KEY, 0) print uri+args d = agent.request('GET', uri+args, None, None) def image_received(response): print vars(response) d = readBody(response) d.addCallback(cbBody) def cbBody(body): print 'Response body:' f = open("test_image_2.jpg","wb") f.write(body) f.close() d.addCallback(image_received) reactor.run()
class Music: def __init__(self): self.recenttime=0 self.player = Agent(reactor) self.timePath = networking.musicPlayer.path("time") def current_time(self): """return deferred which gets called with the current time. This gets called really often""" d = self.player.request("GET", self.timePath) d.addCallback(self._timeReturned) return d def _timeReturned(self, response): done = Deferred() done.addCallback(self._bodyReceived) response.deliverBody(GatherJson(done)) return done def _bodyReceived(self, data): if 't' in data: dispatcher.send("input time", val=data['t']) if 'song' in data and data['song']: dispatcher.send("current_player_song", song=URIRef(data['song'])) return data['t'] # pass along to the real receiver def playOrPause(self, t=None): if t is None: # could be better self.current_time().addCallback(lambda t: self.playOrPause(t)) else: self.player.request("POST", networking.musicPlayer.path("seekPlayOrPause"), bodyProducer=StringProducer(json.dumps({"t" : t})))
def fetch_deferred(self, request_params): """The main core to start the reacter and run the API in the background. Also the callbacks are registered here :return: crochet EventualResult """ finished_resp = Deferred() agent = Agent(reactor) deferred = agent.request(**request_params) def response_callback(response): """Callback for response received from server, even 4XX, 5XX possible response param stores the headers and status code. It needs a callback method to be registered to store the response body which is provided using deliverBody """ response.deliverBody(_HTTPBodyFetcher(request_params, response, finished_resp)) deferred.addCallback(response_callback) def response_errback(reason): """Error callback method like server not reachable or conn. refused :param reason: The reason why request failed :type reason: str """ finished_resp.errback(reason) deferred.addErrback(response_errback) return finished_resp
def getMarket(card): cached = globals()['gMarket_Cached'] if card.marketComments in cached and card.marketReviews in cached: d1 = defer.Deferred() d1.addCallback(lambda x: cached[card.marketComments]) d1.callback(None) d2 = defer.Deferred() d2.addCallback(lambda x: cached[card.marketReviews]) d2.callback(None) return defer.DeferredList((d1,d2)) agent = Agent(reactor) headers = {} for k,v in standard_headers.items(): if k == 'User-Agent': headers.update({'User-Agent':[standard_user_agents[randint(0,len(standard_user_agents)-1)]]}) else: headers.update({k:v}) c = defer.Deferred() c.addCallback(parseMarket, url=card.marketComments) r = defer.Deferred() r.addCallback(parseMarket, url=card.marketReviews) request_comments = agent.request('GET', str(card.marketComments),Headers(),None) request_comments.addCallback(downloadPage, c) request_reviews = agent.request('GET', str(card.marketReviews),Headers(),None) request_reviews.addCallback(downloadPage, r) return defer.DeferredList((c,r))
def check_timeouts(): # the server we should poke, defined at the top of this file global TARGET_TIMEOUT_SERVER sessions = Session.objects.get_timedout_sessions() for session in sessions: agent = Agent(reactor) print "Timing out session: %s (%d)" % (session.task.name, session.id) payload_dict = { 'patient': session.patient.id, 'session': session.id } payload = "&".join(map(lambda x: "%s=%s" % (x, payload_dict[x]), payload_dict)) d = agent.request( 'POST', TARGET_TIMEOUT_SERVER, Headers({ "Content-Type": ["application/x-www-form-urlencoded;charset=utf-8"], "Content-Length": [str(len(payload))] }), StringProducer(payload)) d.addCallback(session_timeout_finished, sessionid=session.id) d.addErrback(session_timeout_errored, sessionid=session.id) # run again in a bit reactor.callLater(30, check_timeouts)
def httpsRequest(self, url, headers={}, method='GET', data=None): headers['Content-Type'] = ['application/json'] if url.startswith('https'): agent = Agent(reactor, WebClientContextFactory()) else: agent = Agent(reactor) if data: data = StringProducer(data) request = yield agent.request( method, url, Headers(headers), data ) if request.length: d = defer.Deferred() request.deliverBody(BodyReceiver(d)) body = yield d defer.returnValue(json.loads(body.read())) else: defer.returnValue(None)
def get_page(url, method="GET", payload=None, headers=None): """Downloads the page from given URL, using asynchronous networking""" agent = Agent(reactor) producer = None if payload: producer = StringProducer(payload) _headers = {"User-Agent": [settings.USER_AGENT]} if headers: for key, value in headers.items(): _headers[key] = [value] response = (yield agent.request(method, str(url), Headers(_headers), producer)) # for h in response.headers.getAllRawHeaders(): # print h try: finished = defer.Deferred() (yield response).deliverBody(ResponseCruncher(finished)) except: raise Exception("Downloading page '%s' failed" % url) defer.returnValue((yield finished))
def _do_request(self, method, document, **params): url = "%s/%s" % (self.API_SERVER, document) headers = self._get_headers() if method == "GET": # FIXME: Get rid of this if document in ["bugreport.json", "tefrequest.json", "version.json"]: url += "?q=" + urllib.quote(json.dumps(params)) else: url += "?" + urllib.urlencode(params) producer = None elif method == "POST": producer = StringProducer(urllib.urlencode(params)) headers["Content-Type"] = ["application/x-www-form-urlencoded"] else: raise AssertionError(method) log.info("Requsting %s %s %r" % (method, url, headers)) agent = Agent(reactor) d = agent.request(method, url, Headers(headers), producer) def dataReceived(response): finished = Deferred() response.deliverBody(JsonDownloader(finished)) return finished d.addCallback(dataReceived) return d
def http_get(uri): """ Performs a GET request :param uri: The URL to perform a GET request to :return: A deferred firing the body of the response. :raises HttpError: When the HTTP response code is not OK (i.e. not the HTTP Code 200) """ def _on_response(response): if response.code == http.OK: return readBody(response) if response.code == http.FOUND: # Check if location header contains magnet link location_headers = response.headers.getRawHeaders("location") if not location_headers: return fail(Failure(RuntimeError("HTTP redirect response does not contain location header"))) new_uri = location_headers[0] if new_uri.startswith('magnet'): _, infohash, _ = parse_magnetlink(new_uri) if infohash: return succeed(new_uri) return http_get(new_uri) raise HttpError(response) try: contextFactory = WebClientContextFactory() agent = Agent(reactor, contextFactory) headers = Headers({'User-Agent': ['Tribler ' + version_id]}) deferred = agent.request('GET', uri, headers, None) deferred.addCallback(_on_response) return deferred except: return fail()
def testCreateCommitProvision(self): agent = Agent(reactor) header = Headers({'User-Agent': ['OpenNSA Test Client'], 'Host': ['localhost'] } ) payload = { "source" : "aruba:topology:ps?vlan=1783", "destination" : "aruba:topology:bon?vlan=1783", "auto_commit" : False } payload_data = json.dumps(payload) create_url = 'http://localhost:%i%s' % (self.PORT, rest.PATH) producer = FileBodyProducer(StringIO(payload_data)) resp = yield agent.request('POST', create_url, header, producer) self.failUnlessEqual(resp.code, 201, 'Service did not return created') if not resp.headers.hasHeader('location'): self.fail('No location header in create response') conn_url = 'http://localhost:%i%s' % (self.PORT, resp.headers.getRawHeaders('location')[0]) # so... the connection will not necesarely have moved into reserveheld or all sub-connections might not even be in place yet # we cannot really commit until we are in created and ReserveHeld # the clock doesn't really do anything here (not scheduling related) yield task.deferLater(reactor, 0.1, self._createCommitProvisionCB, agent, conn_url, header)
def check_schedule(): tasks = ScheduledTask.objects.get_due_tasks() for sched_task in tasks: agent = Agent(reactor) print "Executing task: ", sched_task.task.name payload = "user=%d&task=%d&arguments=%s" % (sched_task.user.id, sched_task.task.id, json.dumps(sched_task.arguments)) print payload d = agent.request( 'POST', # ullr? #'http://ullr:8001/taskmanager/exec', 'http://localhost:8001/taskmanager/exec', Headers({ "Content-Type": ["application/x-www-form-urlencoded;charset=utf-8"], "Content-Length": [str(len(payload))] }), StringProducer(payload)) d.addCallback(task_finished, sched_taskid=sched_task.id) d.addErrback(task_errored, sched_taskid=sched_task.id) # run again in a bit reactor.callLater(5, check_schedule)
def render_GET(self, request): def fail(failure): request.write('we failed %s' % failure) request.finish() def return_body(body): """Called when we have a full response""" response = {'web-request': body} response = json.dumps(response, ensure_ascii=False, encoding='utf-8').encode('utf-8') request.write(response) request.finish() def get_body(result): # now that we have the body, # we can return the result, using ready body # which is also a async operation. d2 = readBody(result) # get the contents of the page. d2.addCallback(return_body) d2.addErrback(fail) # setup the deferred/callback for the first asynchronous # call... agent = Agent(reactor) d1 = agent.request('GET', 'http://example.com/') d1.addCallback(get_body) d1.addErrback(fail) return server.NOT_DONE_YET
def remoteCall(self, call): """ RPC handler remoting to Ext.Direct servers. This method is usually registered via registerHandlerMethodForRpc on a WAMP protocol. """ proto = call.proto uri = call.uri args = call.args ## extract extra information from RPC call handler argument (id, action, method, _) = call.extra ## get the Ext.Direct remote onto which we will forward the call remote = self.remotesById[id] ## construct the POST body d = {'action': action, 'method': method, 'data': args, 'type': 'rpc', 'tid': 1} body = json_dumps(d) if remote.forwardCookies and \ proto.cookies and \ proto.cookies.has_key(remote.routerDomain) and \ proto.cookies[remote.routerDomain] != "": cookie = str(proto.cookies[remote.routerDomain]) else: cookie = None if not remote.usePersistentConnections: ## Do HTTP/POST as individual request ## headers = {'Content-Type': 'application/json', 'User-Agent': ExtDirectRemoter.USER_AGENT} if cookie: headers['Cookie'] = cookie d = getPage(url = remote.routerUrl, method = 'POST', postdata = body, headers = headers, timeout = remote.requestTimeout, connectionTimeout = remote.connectionTimeout, followRedirect = remote.redirectLimit > 0) else: ## Do HTTP/POST via HTTP connection pool ## headers = {'Content-Type': ['application/json'], 'User-Agent': [ExtDirectRemoter.USER_AGENT]} if cookie: headers['Cookie'] = [cookie] agent = Agent(self.reactor, pool = self.httppools[remote.id], connectTimeout = remote.connectionTimeout) if remote.redirectLimit > 0: agent = RedirectAgent(agent, redirectLimit = remote.redirectLimit) ## FIXME: honor requestTimeout d = agent.request('POST', remote.routerUrl, Headers(headers), StringProducer(body)) def onResponse(response): if response.code == 200: finished = Deferred() response.deliverBody(StringReceiver(finished)) return finished else: return defer.fail("%s [%s]" % (response.code, response.phrase)) d.addCallback(onResponse) ## request information provided as error detail in case of call fails remotingRequest = {'provider': 'extdirect', 'router-url': remote.routerUrl, 'use-persistent-connections': remote.usePersistentConnections, 'request-timeout': remote.requestTimeout, 'connection-timeout': remote.connectionTimeout, 'action': action, 'method': method} d.addCallbacks(self._onRemoteCallResult, self._onRemoteCallError, callbackArgs = [remotingRequest], errbackArgs = [remotingRequest]) ## FIXME! d.addCallback(self.onAfterRemoteCallSuccess, id) d.addErrback(self.onAfterRemoteCallError, id) return d
def createNonVerifyingHTTPClient(reactor, agent_kwds=None, **kwds): agent_kwds = normalizeDict_(agent_kwds) agent_kwds['contextFactory'] = NonVerifyingContextFactory() return HTTPClient(Agent(reactor, **agent_kwds), **kwds)
def test_EncodingJSONRPCServer(self): DATA = {'foo': 'bar'} REQUEST = '{"jsonrpc": "2.0", "method": "test", "params": [], "id": 1}' RESPONSE = '{"jsonrpc": "2.0", "id": 1, "result": ' + json.dumps(DATA) + '}' class RPCServer(JSONRPCServer): def jsonrpc_test(self): return defer.succeed(DATA) class ReceiverProtocol(Protocol): def __init__(self, finished): self.finished = finished self.body = [] def dataReceived(self, bytes): self.body.append(bytes) def connectionLost(self, reason): self.finished.callback(''.join(self.body)) class StringProducer(object): implements(IBodyProducer) def __init__(self, body): self.body = body self.length = len(body) def startProducing(self, consumer): consumer.write(self.body) return defer.succeed(None) def pauseProducing(self): pass def stopProducing(self): pass server = RPCServer() resource = EncodingJSONRPCServer(server) site = Site(resource) port = reactor.listenTCP(8888, site, interface='127.0.0.1') agent = ContentDecoderAgent(Agent(reactor), [('gzip', GzipDecoder)]) response = yield agent.request('POST', 'http://127.0.0.1:8888', Headers({'Accept-Encoding': ['gzip']}), StringProducer(REQUEST)) self.assertTrue(isinstance(response, GzipDecoder)) finished = defer.Deferred() response.deliverBody(ReceiverProtocol(finished)) data = yield finished self.assertEqual(data, RESPONSE) port.stopListening()
class Proxy(object): """ A proxy to one specific JSON-RPC server. Pass the server URL to the constructor and call proxy.callRemote('method', *args) to call 'method' with *args. """ def __init__(self, url, version=jsonrpc.VERSION_1, connectTimeout=None, credentials=None, contextFactory=None, pool=None): """ @type url: str @param url: URL of the RPC server. Supports HTTP and HTTPS for now, more might come in the future. @type version: int @param version: Which JSON-RPC version to use? The default is 1.0. @type connectTimeout: float @param connectTimeout: Connection timeout. Note that we don't connect when creating this object, but in callRemote, so the timeout will apply to callRemote. @type credentials: twisted.cred.credentials.ICredentials @param credentials: Credentials for basic HTTP authentication. Supported are Anonymous and UsernamePassword classes. If None then t.c.c.Anonymous object is used as default. @type contextFactory: twisted.internet.ssl.ClientContextFactory @param contextFactory: A context factory for SSL clients. If None then Agent's default is used. @type pool: twisted.web.client.HTTPConnectionPool @param pool: Connection pool used to manage HTTP connections. If None then Agent's default is used. """ self.url = url self.version = version if not credentials: credentials = Anonymous() if not isinstance(credentials, (Anonymous, UsernamePassword)): raise NotImplementedError("'%s' credentials are not supported" % type(credentials)) kwargs = {} if connectTimeout: kwargs['connectTimeout'] = connectTimeout if contextFactory: kwargs['contextFactory'] = contextFactory if pool: kwargs['pool'] = pool self.agent = Agent(reactor, **kwargs) self.credentials = credentials self.auth_headers = None def checkAuthError(self, response): """ Check for authentication error. @type response: t.w.c.Response @param response: Response object from the call @raise JSONRPCError: If the call failed with authorization error @rtype: t.w.c.Response @return If there was no error, just return the response """ if response.code == 401: raise jsonrpc.JSONRPCError('Unauthorized', jsonrpc.INVALID_REQUEST) return response def bodyFromResponse(self, response): """ Parses out the body from the response @type response: t.w.c.Response @param response: Response object from the call @rtype: t.i.d.Deferred @return: Deferred, that will fire callback with body of the response (as string) """ finished = Deferred() response.deliverBody(ReceiverProtocol(finished)) return finished def callRemote(self, method, *args, **kwargs): """ Remotely calls the method, with args. Given that we keep reference to the call via the Deferred, there's no need for id. It will coin some random anyway, just to satisfy the spec. @type method: str @param method: Method name @type *args: list @param *args: List of agruments for the method. @rtype: t.i.d.Deferred @return: Deferred, that will fire with whatever the 'method' returned. @TODO support batch requests """ if kwargs: json_request = jsonrpc.encodeRequest(method, kwargs, version=self.version) else: json_request = jsonrpc.encodeRequest(method, args, version=self.version) body = StringProducer(json_request) headers_dict = {'Content-Type': ['application/json']} if not isinstance(self.credentials, Anonymous): headers_dict.update(self._getBasicHTTPAuthHeaders()) headers = Headers(headers_dict) d = self.agent.request(b'POST', self.url.encode(), headers, body) d.addCallback(self.checkAuthError) d.addCallback(self.bodyFromResponse) d.addCallback(jsonrpc.decodeResponse) return d def _getBasicHTTPAuthHeaders(self): """ @rtype dict @return 'Authorization' header """ if not self.auth_headers: username = self.credentials.username password = self.credentials.password if password is None: password = '' # encoded_cred = base64.encodestring('%s:%s' % (username, password)) encoded_cred = base64.encodebytes( f'{username}:{password}'.encode()).decode() auth_value = "Basic " + encoded_cred.strip() self.auth_headers = {'Authorization': [auth_value]} return self.auth_headers
def __init__(self, api_key): contextFactory = WebClientContextFactory() self.agent = Agent(reactor, contextFactory) self._api_key = api_key
def get_web_agent(): """An HTTP agent that connects to the web without using Tor""" return Agent(reactor, connectTimeout=5)
class OpenBMCPowerDriver(PowerDriver): chassis = False name = "openbmc" description = "OpenBMC Power Driver" settings = [ make_setting_field("power_address", "OpenBMC address", required=True), make_setting_field("power_user", "OpenBMC user", required=True), make_setting_field( "power_pass", "OpenBMC password", field_type="password", required=True, ), ] ip_extractor = make_ip_extractor("power_address") cookie_jar = compat.cookielib.CookieJar() agent = CookieAgent( Agent(reactor, contextFactory=WebClientContextFactory()), cookie_jar ) def detect_missing_packages(self): # no required packages return [] @asynchronous def openbmc_request(self, method, uri, data=None): """Send the RESTful request and return the response.""" d = self.agent.request( method, uri, Headers({b"Content-Type": [b"application/json"]}), data, ) def cb_request(response): """Render the response received.""" def decode_data(data): data = data.decode("utf-8") return json.loads(data) # Error out if the response has a status code of 400 or above. if response.code >= int(HTTPStatus.BAD_REQUEST): raise PowerActionError( "OpenBMC request failed with response status code:" " %s." % response.code ) f = readBody(response) f.addCallback(decode_data) return f d.addCallback(cb_request) return d def get_uri(self, context, path=None): """Return url for the host.""" uri = context.get("power_address") if path is not None: uri = uri + path if "https" not in uri and "http" not in uri: uri = join("https://", uri) return uri.encode("utf-8") @inlineCallbacks def command(self, context, method, uri, data=None): """Current deployments of OpenBMC in the field do not support header based authentication. To issue RESTful commands, we need to login, issue RESTful command and logout. """ # login to BMC login_uri = self.get_uri(context, "/login") login_creds = { "data": [context.get("power_user"), context.get("power_pass")] } login_data = FileBodyProducer( BytesIO(json.dumps(login_creds).encode("utf-8")) ) login = yield self.openbmc_request(b"POST", login_uri, login_data) login_status = login.get("status") if login_status.lower() != "ok": raise PowerFatalError( "OpenBMC power driver received unexpected response" " to login command" ) # issue command cmd_out = yield self.openbmc_request(method, uri, data) # logout of BMC logout_uri = self.get_uri(context, "/logout") logout_creds = {"data": []} logout_data = FileBodyProducer( BytesIO(json.dumps(logout_creds).encode("utf-8")) ) logout = yield self.openbmc_request(b"POST", logout_uri, logout_data) logout_status = logout.get("status") if logout_status.lower() != "ok": raise PowerFatalError( "OpenBMC power driver received unexpected response" " to logout command" ) return cmd_out @inlineCallbacks def set_pxe_boot(self, context): """Set the host to PXE boot.""" # set boot mode to one-time boot. uri = self.get_uri(context, HOST_CONTROL + "one_time/attr/BootMode") data = FileBodyProducer(BytesIO(json.dumps(REG_MODE).encode("utf-8"))) yield self.command(context, b"PUT", uri, data) # set one-time boot source to network. uri = self.get_uri(context, HOST_CONTROL + "one_time/attr/BootSource") data = FileBodyProducer(BytesIO(json.dumps(SRC_NET).encode("utf-8"))) yield self.command(context, b"PUT", uri, data) @asynchronous @inlineCallbacks def power_query(self, system_id, context): """Power query host.""" uri = self.get_uri(context, HOST_STATE + "CurrentHostState") power_state = yield self.command(context, b"GET", uri, None) status = power_state.get("data").split(".")[-1].lower() if all(status not in state for state in ("running", "off")): raise PowerFatalError( "OpenBMC power driver received unexpected response" "to power query command" ) return {"running": "on", "off": "off"}.get(status) @asynchronous @inlineCallbacks def power_on(self, system_id, context): """Power on host.""" cur_state = yield self.power_query(system_id, context) uri = self.get_uri(context, HOST_STATE + "RequestedHostTransition") # power off host if it is currently on. if cur_state == "on": data = FileBodyProducer( BytesIO(json.dumps(HOST_OFF).encode("utf-8")) ) off_state = yield self.command(context, b"PUT", uri, data) status = off_state.get("status") if status.lower() != "ok": raise PowerFatalError( "OpenBMC power driver received unexpected response" " to power off command" ) # set one-time boot to PXE boot. yield self.set_pxe_boot(context) # power on host. data = FileBodyProducer(BytesIO(json.dumps(HOST_ON).encode("utf-8"))) on_state = yield self.command(context, b"PUT", uri, data) status = on_state.get("status") if status.lower() != "ok": raise PowerFatalError( "OpenBMC power driver received unexpected response" " to power on command" ) @asynchronous @inlineCallbacks def power_off(self, system_id, context): """Power off host.""" uri = self.get_uri(context, HOST_STATE + "RequestedHostTransition") data = FileBodyProducer(BytesIO(json.dumps(HOST_OFF).encode("utf-8"))) # set next one-time boot to PXE boot. yield self.set_pxe_boot(context) # power off host. power_state = yield self.command(context, b"PUT", uri, data) status = power_state.get("status") if status.lower() != "ok": raise PowerFatalError( "OpenBMC power driver received unexpected response" " to power off command" )
def setUp(self): """ Ready the environment for tests which actually run docker against powerstrip with powerstrip-flocker enabled. * Log into each node in turn: * Run powerstrip-flocker in docker * Run powerstrip in docker """ self.agent = Agent(reactor) # no connectionpool self.client = HTTPClient(self.agent) d = get_test_cluster(self, 2) def got_cluster(cluster): self.cluster = cluster self.powerstripflockers = {} self.powerstrips = {} daemonReadyDeferreds = [] self.ips = [node.address for node in cluster.nodes] for ip in self.ips: # cleanup after previous test runs #run(ip, ["pkill", "-f", "flocker"]) for proc in ("powerstrip", "powerstrip-flocker"): try: run(ip, ["docker", "rm", "-f", proc]) except Exception: print proc, "was not running, not killed, OK." # put a powerstrip config in place run(ip, ["mkdir", "-p", "/root/powerstrip-config"]) run( ip, ["sh", "-c", "cat > /root/powerstrip-config/adapters.yml"], """ version: 1 endpoints: "POST /*/containers/create": pre: [flocker] adapters: flocker: http://powerstrip-flocker/flocker-adapter """) # start powerstrip-flocker POWERSTRIP_FLOCKER = "%s/powerstrip-flocker:latest" % ( DOCKER_PULL_REPO, ) run(ip, ["docker", "pull", POWERSTRIP_FLOCKER]) # TODO - come up with cleaner/nicer way of powerstrip-flocker # being able to establish its own host uuid (or volume # mountpoints), such as API calls. host_uuid = run(ip, [ "python", "-c", "import json; " "print json.load(open('/etc/flocker/volume.json'))['uuid']" ]).strip() self.powerstripflockers[ip] = remote_service_for_test( self, ip, [ "docker", "run", "--name=powerstrip-flocker", "--expose", "80", "-p", "9999:80", # so that we can detect it being up "-e", "FLOCKER_CONTROL_SERVICE_BASE_URL=%s" % (self.cluster.base_url, ), "-e", "MY_NETWORK_IDENTITY=%s" % (ip, ), "-e", "MY_HOST_UUID=%s" % (host_uuid, ), POWERSTRIP_FLOCKER ]) print "Waiting for powerstrip-flocker to show up on", ip, "..." daemonReadyDeferreds.append(wait_for_socket(ip, 9999)) # start powerstrip # TODO - use the new unix-socket powerstrip approach. POWERSTRIP = "clusterhq/powerstrip:latest" run(ip, ["docker", "pull", POWERSTRIP]) self.powerstrips[ip] = remote_service_for_test( self, ip, [ "docker", "run", "--name=powerstrip", "-p", "2375:2375", "-v", "/var/run/docker.sock:/var/run/docker.sock", "-v", "/root/powerstrip-config/adapters.yml:" "/etc/powerstrip/adapters.yml", "--link", "powerstrip-flocker:powerstrip-flocker", POWERSTRIP ]) print "Waiting for powerstrip to show up on", ip, "..." daemonReadyDeferreds.append(wait_for_socket(ip, 2375)) d = defer.gatherResults(daemonReadyDeferreds) # def debug(): # services # import pdb; pdb.set_trace() # d.addCallback(lambda ignored: deferLater(reactor, 1, debug)) return d d.addCallback(got_cluster) return d
print('Finished receiving body:', self.num, reason.getErrorMessage()) #print(len(self.result)) print(" %s 数据有丢失,如果要处理这个错误的话,在默认设置中" "将DOWNLOAD_FAIL_ON_DATALOSS = False" % self.response.request.absoluteURI.decode()) r = json.loads(self.result) #callback(data)调用后,能够向defer数据链中传入一个list数据:[True,传入的参数data],可以实现将获取的 #body传输到下一个函数中去 self.finished.callback(r) url = 'https://www.smzdm.com/homepage/json_more?p=' contextFactory = WebClientContextFactory() agent = Agent(reactor, contextFactory) result = list() t1 = time.time() for i in range(3): i = str(i) u = url + i print(u) d = agent.request(b"GET", u.encode("utf-8")) d.addCallback(cbRequest, u, i) d.addErrback(lambda result: print(result)) #d.addCallback(get_smzdm_datas) #d.addCallback(print_smzdm_result,u) result.append(d) dd = defer.DeferredList(result)
class MPEx(object): testdata = None def __init__(self, debug=False, pool=None, mpexurl='http://mpex.co', **kwargs): self.gpg = gnupg.GPG() self.mpex_url = mpexurl self._mpex_fingerprint = 'A57D509A' self.passphrase = None self.debug = debug if (self.debug): self.df = open("mpex_%d.txt" % time.time(), 'w') self.agent = Agent(reactor, pool=pool, connectTimeout=TIMEOUT / 3) def command(self, command): if (self.debug): self.df.write(command) log.info("command('%s')", command) if (self.testdata): log.debug('returning testdata instead:%s', self.testdata) return self.testdata if self.passphrase == None: return None signed_data = str(self.gpg.sign(command, passphrase=self.passphrase)) m = hashlib.md5() m.update(signed_data) md5d = m.hexdigest() log.debug('Signed:' + signed_data + "\nDigest/Track: " + md5d + "\n") encrypted_ascii_data = self.gpg.encrypt(str(signed_data), self.mpex_fingerprint(), passphrase=self.passphrase) data = urllib.urlencode({'msg': str(encrypted_ascii_data)}) body = FileBodyProducer(StringIO(data)) d = self.agent.request( 'POST', self.mpex_url, Headers({ 'Content-Type': ['application/x-www-form-urlencoded'], #'Connection': ['Keep-Alive'] #redundant in HTTP/1.1 }), body) def cbCommand(response): log.info('Response: %s %s %s', response.version, response.code, response.phrase) log.debug('Response headers: %s', pformat(list(response.headers.getAllRawHeaders()))) finished = Deferred() timeout = reactor.callLater(TIMEOUT / 2, finished.cancel) response.deliverBody(StringRcv(finished, timeout)) finished.addCallback(self.decrypt, md5hash=md5d) return finished d.addCallback(cbCommand) self.timeout = reactor.callLater(TIMEOUT, d.cancel) #TODO add retry in case of ResponseNeverReceived error, #most likely caused by closing of persistent connection by server return d def decrypt(self, result, md5hash): if (self.debug): self.df.write(result) log.debug(result) reply = str(self.gpg.decrypt(result, passphrase=self.passphrase)) if (self.debug): self.df.write(reply) self.df.flush() log.debug('decrypted reply:%s', reply) if not self.gpg.verify(reply): log.error('Invalid Signature,ignoring data!') reply = None if self.timeout.active(): self.timeout.cancel() if reply == '': return None return dict(message=reply, md5hash=md5hash) def checkKey(self): keys = self.gpg.list_keys() for key in keys: if key['fingerprint'].endswith(self.mpex_fingerprint()): return True return False def mpex_fingerprint(self): """use/check current MPEx key depending on date""" return self._mpex_fingerprint
def __init__(self, config): self.base_url = config["harold"]["base-url"] self.secret = config["harold"]["hmac-secret"] self.connection_pool = HTTPConnectionPool(reactor) self.agent = Agent(reactor, pool=self.connection_pool)
class RpcClient(object): """Core JSON-RPC client class.""" def __init__( self, areactor, #The Twisted reactor log, #The asynchonous logger nodes=None, #If set, nodes overrules the nodelist list of nodelist. NOTE, this will set max_batch_size to one! max_batch_size=None, #If set, max_batch_size overrules the max_batchsize of nodelist. nodelist="default", #Other than "default", "stage" can be used and will use api.steemitstage.com # with a max_batch_size of 16 parallel=16, #Maximum number of paralel outstanding HTTPS JSON-RPC at any point in time. rpc_timeout=15, #Timeout for a single HTTPS JSON-RPC query. stop_when_empty=False ): #Stop the reactor then the command queue is empty. """Constructor for asynchonour JSON-RPC client. Args: areactor : The Twisted reactor log : The Twisted asynchonous logger nodes : List of API nodes, you normally should NOT use this, if you use this variable, also use max_batch_size! max_batch_size : The max batch size to use for JSON-RPC batched calls. Only use with nodes that support batched RPC calls! nodelist : Name of the nodelist to use. "default" and "stage" are currently valid values for this field. parallel : Maximum number of paralel outstanding HTTPS JSON-RPC at any point in time. rpc_timeout : Timeout (in seconds) for a single HTTPS JSON-RPC query. stop_when_empty : Boolean indicating if reactor should be stopped when the command queue is empty and no active HTTPS sessions remain. """ self.reactor = areactor self.log = log if nodes: #If nodes is defined, overrule nodelist with custom list of nodes. self.nodes = nodes self.max_batch_size = 1 else: #See nodesets.py for content. We use the nodes and max_batch_size as specified by the nodelist argument. self.nodes = nodesets.nodeset[nodelist]["nodes"] self.max_batch_size = nodesets.nodeset[nodelist]["max_batch_size"] if max_batch_size != None: self.max_batch_size = max_batch_size self.parallel = parallel self.rpc_timeout = rpc_timeout self.node_index = 0 #Start of with the first JSON-RPC node in the node list. self.agent = Agent(areactor) #HTTP(s) Agent self.cmd_seq = 0 #Unique sequence number used for commands in the command queue. self.last_rotate = 0 #Errors may come in batches, we keep track of the last rotate to an other node to avoid responding to #errors from previois nodes. self.errorcount = 0 #The number of errors seen since the previous node rotation. self.entries = dict( ) #Here the actual commands from the command queue are stored, keyed by sequence number. self.queue = list( ) #The actual command queue is just a list of sequence numbers. self.active_call_count = 0 #The current number of active HTTPS POST calls. self.stop_when_empty = stop_when_empty self.log.info("Starting off with node {node!r}.", node=self.nodes[self.node_index]) def _next_node(self, reason): #We may have reason to move on to the next node, check how long ago we did so before and how many errors we have seen since. now = time.time() ago = now - self.last_rotate self.errorcount = self.errorcount + 1 #Only if whe have been waiting a bit longer than the RPC timeout time, OR we have seen a bit more than the max amount of # paralel HTTPS requests in errors, then it will be OK to rotate once more. if ago > (self.rpc_timeout + 2) or self.errorcount > (self.parallel + 1): self.log.error( "Switching from {oldnode!r} to an other node due to error : {reason!r}", oldnode=self.nodes[self.node_index], reason=reason) self.last_rotate = now self.node_index = (self.node_index + 1) % len(self.nodes) self.errorcount = 0 self.log.info("Switching to node {node!r}", node=self.nodes[self.node_index]) def __call__(self): """Invoke the object to send out some of the queued commands to a server""" dv = None #Push as many queued calls as the self.max_batch_size and the max number of paralel HTTPS sessions allow for. while self.active_call_count < self.parallel and self.queue: #Get a chunk of entries from the command queue so we can make a batch. subqueue = self.queue[:self.max_batch_size] self.queue = self.queue[self.max_batch_size:] #Send a single batch to the currently selected RPC node. dv = self._process_batch(subqueue) #If there is nothing left to do, there is nothing left to do if not self.queue and self.active_call_count == 0: self.log.error( "Queue is empty and no active HTTPS-POSTs remaining.") if self.stop_when_empty: #On request, stop reactor when queue empty while no active queries remain. self.reactor.stop() return dv def _process_batch(self, subqueue): """Send a single batch of JSON-RPC commands to the server and process the result.""" try: timeoutCall = None jo = None if self.max_batch_size == 1: #At time of writing, the regular nodes have broken JSON-RPC batch handling. #So when max_batch_size is set to one, we assume we need to work around this fact. jo = json.dumps( self.entries[subqueue[0]]._get_rpc_call_object()) else: #The api.steemitstage.com node properly supports JSON-RPC batches, and so, hopefully soon, will the other nodes. qarr = list() for num in subqueue: qarr.append(self.entries[num]._get_rpc_call_object()) jo = json.dumps(qarr) call = FileBodyProducer(BytesIO(str.encode(str(jo)))) url = "https://" + self.nodes[self.node_index] + "/" url = str.encode(str(url)) deferred = self.agent.request( b'POST', url, Headers({ "User-Agent": ['Async Steem for Python v0.6.1'], "Content-Type": ["application/json"] }), call) def process_one_result(reply): """Process a single response from an JSON-RPC command.""" try: if "id" in reply: reply_id = reply["id"] if reply_id in self.entries: match = self.entries[reply_id] if "result" in reply: #Call the proper result handler for the request that this response belongs to. match._handle_result(reply["result"]) else: if "error" in reply and "code" in reply[ "error"]: msg = "No message included with error" if "message" in reply["error"]: msg = reply["error"]["message"] #Call the proper error handler for the request that this response belongs to. match._handle_error( reply["error"]["code"], msg) else: self.log.error( "Error: Invalid JSON-RPC response entry." ) #del self.entries[reply_id] else: self.log.error( "Error: Invalid JSON-RPC id in entry {rid!r}", rid=reply_id) else: self.log.error( "Error: Invalid JSON-RPC response without id in entry: {ris!r}." ) except Exception as ex: self.log.failure("Error in _process_one_result {err!r}", err=str(ex)) def handle_response(response): """Handle response for JSON-RPC batch query invocation.""" try: #Cancel any active timeout for this HTTPS call. if timeoutCall.active(): timeoutCall.cancel() def cbBody(bodystring): """Process response body for JSON-RPC batch query invocation.""" try: results = None #The body SHOULD be JSON, it not always is. try: results = json.loads(bodystring) except Exception as ex: #If the result is NON-JSON, may want to move to the next node in the node list self._next_node( "Non-JSON response from server") #Add the failed sub-queue back to the command queue, we shall try again soon. self.queue = subqueue + self.queue if results != None: ok = False if isinstance(results, dict): #Running in legacy single JSON-RPC call mode (no batches), process the result of the single call. process_one_result(results) ok = True else: if isinstance(results, list): #Running in batch mode, process the batch result, one response at a time for reply in results: process_one_result(reply) ok = True else: #Completely unexpected result type, may want to move to the next node in the node list. self._next_node( "JSON response neither list nor object" ) self.log.error( "Error: Invalid JSON-RPC response, expecting list as response on batch." ) #Add the failed sub-queue back to the command queue, we shall try again soon. self.queue = subqueue + self.queue if ok == True: #Clean up the entries dict by removing all fully processed commands that now are no longer in the queu. for request_id in subqueue: if request_id in self.entries: del self.entries[request_id] else: self.log.error( "Error: No response entry for request entry in result: {rid!r}.", rid=request_id) except Exception as ex: self.log.failure("Error in cbBody {err!r}", err=str(ex)) #This HTTPS POST is now fully processed. self.active_call_count = self.active_call_count - 1 #Invoke self, possibly sending new queues RPC calls to the current node self() deferred2 = readBody(response) deferred2.addCallback(cbBody) return deferred2 except Exception as ex: self.log.failure("Error in handle_response {err!r}", err=str(ex)) #If something went wrong, the HTTPS POST isn't active anymore. self.active_call_count = self.active_call_count - 1 #Invoke self, possibly sending new queues RPC calls to the current node self() deferred.addCallback(handle_response) def _handle_error(error): """Handle network level error for JSON-RPC request.""" try: #Abandon any active timeout triggers if timeoutCall.active(): timeoutCall.cancel() #Unexpected error on HTTPS POST, we may want to move to the next node. self._next_node(error.getErrorMessage()) self.log.error("Error on HTTPS POST : {err!r}", err=error.getErrorMessage()) except Exception as ex: self.log.failure("Error in _handle_error {err!r}", err=str(ex)) #Add the failed sub-queue back to the command queue, we shall try again soon. self.queue = subqueue + self.queue ##If something went wrong, the HTTPS POST isn't active anymore. self.active_call_count = self.active_call_count - 1 #Invoke self, possibly sending new queues RPC calls to the current node self() deferred.addErrback(_handle_error) timeoutCall = self.reactor.callLater(self.rpc_timeout, deferred.cancel) #Keep track of the number of active parallel HTTPS posts. self.active_call_count = self.active_call_count + 1 return deferred except Exception as ex: self.log.failure("Error in _process_batch {err!r}", err=str(ex)) def __getattr__(self, name): def addQueueEntry(*args): """Return a new in-queue JSON-RPC command invocation object with auto generated command name from __getattr__.""" try: #A unique id for each command. self.cmd_seq = self.cmd_seq + 1 #Create a new queu entry self.entries[self.cmd_seq] = _QueueEntry( self, name, args, self.cmd_seq, self.log) #append it to the command queue self.queue.append(self.cmd_seq) #Return handle to the new entry for setting callbacks on. return self.entries[self.cmd_seq] except Exception as ex: self.log.failure("Error in addQueueEntry {err!r}", err=str(ex)) return addQueueEntry #Need to be able to check if RpcClient equatesNone def __eq__(self, val): if val is None: return False return True
def _get_http_client(): agent = Agent(reactor, pool=pool) return HTTPClient(agent)
#coding:utf-8 import geventreactor geventreactor.install() from twisted.internet import reactor from twisted.web.client import Agent from twisted.web.http_headers import Headers agent = Agent(reactor) d = agent.request('GET', 'http://example.com/', Headers({'User-Agent': ['Twisted Web Client Example']}), None) def cbResponse(ignored): print 'Response received' d.addCallback(cbResponse) def cbShutdown(ignored): reactor.stop() d.addBoth(cbShutdown) reactor.run()
def handler(): agent = Agent(reactor, pool=pnconn_pool) if options.data is not None: body = FileBodyProducer(StringIO(options.data)) else: body = None request = agent.request(options.method_string, url, Headers(headers), body) def received(response): finished = Deferred() response.deliverBody(PubNubResponse(finished, response.code)) return finished def success(response, req_url, request): parsed_url = urlparse(req_url) query = parse_qs(parsed_url.query) uuid = None auth_key = None if 'uuid' in query and len(query['uuid']) > 0: uuid = query['uuid'][0] if 'auth_key' in query and len(query['auth_key']) > 0: auth_key = query['auth_key'][0] response_body = response.body code = response.code d = Deferred() response_info = ResponseInfo( status_code=response.code, tls_enabled='https' == parsed_url.scheme, origin=parsed_url.netloc, uuid=uuid, auth_key=auth_key, client_request=request) if code != 200: if code == 403: status_category = PNStatusCategory.PNAccessDeniedCategory elif code == 400: status_category = PNStatusCategory.PNBadRequestCategory else: status_category = self if code >= 500: error = PNERR_SERVER_ERROR else: error = PNERR_CLIENT_ERROR else: error = None status_category = PNStatusCategory.PNAcknowledgmentCategory try: data = json.loads(response_body) except ValueError: try: data = json.loads(response_body.decode("utf-8")) except ValueError: raise PubNubTwistedException( result=create_response(None), status=create_status_response( status_category, response_info, PubNubException( pn_error=PNERR_JSON_DECODING_FAILED, errormsg='json decode error'))) if error: raise PubNubTwistedException( result=data, status=create_status_response( status_category, data, response_info, PubNubException(errormsg=data, pn_error=error, status_code=response.code))) envelope = TwistedEnvelope( create_response(data), create_status_response(status_category, response, response_info, error), data) d.callback(envelope) return d def failed(failure): raise PubNubTwistedException( result=None, status=create_status_response( PNStatusCategory.PNTLSConnectionFailedCategory, None, None, PubNubException(errormsg=str(failure), pn_error=PNERR_CONNECTION_ERROR, status_code=0))) request.addErrback(failed) request.addCallback(received) request.addCallback(success, url, request) return request
from twisted.internet import reactor from twisted.web.client import Agent, readBody from twisted.web.http_headers import Headers # 创建agent对象 agent = Agent(reactor) # agent.request返回一个defered对象,用于设置回调函数 defered = agent.request( b'GET', b'http://www.baidu.com/s?wd=python', Headers({'User-Agent': ['Twisted Web Client Example']}), None) def successCallback(response): print('Response received:', response) d = readBody(response) d.addCallback(cbBody) return d def cbBody(body): print('Response body:') print(body) # 设置成功回调:如果任务执行成功,那么将会调用该回调函数 defered.addCallback(successCallback) def errorCallback(error): print("errback: ", str(error))
class Pusher(object): """ Allows connecting to the MPNS gateway and sending notifications to the end devices. :param pem: a string containing PEM-formatted certificate used to authenticate the client against the gateway server. Only necessary for sending notifications to https based subscriptions. """ PROCESSABLE_RESPONSES = [200, 404, 406, 412] RESPONSE_TO_ERROR = { 400: 'Bad request', 401: 'Unauthorized', 405: 'Method not allowed', 503: 'Service unavailable' } def __init__(self, pem=None): self._agent = Agent(reactor, NotificationPolicyForHTTPS(pem)) @inlineCallbacks def send(self, notification): """ Send prepared notification to the gateway server and fire some events based on the server's response. Raise an exception if the gateway rejected the request, the response could not be parsed, or we know the notification will never be delivered. :return an instance of NotificationStatus, containing notification, subscription and device statuses extracted from the response. """ logger.debug('Sending request') body = StringProducer(notification.requestBody) headers = Headers(notification.requestHeaders) response = yield self._agent.request('POST', notification.requestUri, headers, body) logger.debug('Response code: %i', response.code) if response.code in self.PROCESSABLE_RESPONSES: returnValue(self._processResponse(response)) else: self._processErrorResponse(response) @staticmethod def _extractHeader(response, name): header = response.headers.getRawHeaders(name) if header is not None and len(header) > 0: return header[0] else: return '' @classmethod def _processResponse(cls, response): status = NotificationStatus( notification=cls._extractHeader(response, 'X-NotificationStatus'), subscription=cls._extractHeader(response, 'X-SubscriptionStatus'), device=cls._extractHeader(response, 'X-DeviceConnectionStatus')) logger.debug('Notification ' + status.notification) logger.debug('Subscription ' + status.subscription) logger.debug('Device ' + status.device) extra = {'response code': response.code, 'status': status} if response.code == 200: if status.notification in ['Received', 'Suppressed']: return status elif status.notification == 'QueueFull': raise QueueFullError('Queue full', extra=extra) elif response.code == 404 and status.subscription == 'Expired': raise SubscriptionExpiredError('Subscription expired', extra=extra) elif response.code == 406: raise ThrottlingLimitError('Throttling limit hit', extra=extra) elif response.code == 412: raise DeviceDisconnectedError('Device disconnected', extra=extra) if status.notification == 'Dropped': raise DeliveryError('Dropped for unknown reason', extra=extra) else: raise InvalidResponseError('Invalid notification status', extra=extra) @staticmethod def _processErrorResponse(response): message = Pusher.RESPONSE_TO_ERROR.get(response.code, 'Unknown response code') raise HTTPError(message, {'response code': response.code})
def __init__(self, hs, treq_args={}, ip_whitelist=None, ip_blacklist=None): """ Args: hs (synapse.server.HomeServer) treq_args (dict): Extra keyword arguments to be given to treq.request. ip_blacklist (netaddr.IPSet): The IP addresses that are blacklisted that we may not request. ip_whitelist (netaddr.IPSet): The whitelisted IP addresses, that we can request if it were otherwise caught in a blacklist. """ self.hs = hs self._ip_whitelist = ip_whitelist self._ip_blacklist = ip_blacklist self._extra_treq_args = treq_args self.user_agent = hs.version_string self.clock = hs.get_clock() if hs.config.user_agent_suffix: self.user_agent = "%s %s" % (self.user_agent, hs.config.user_agent_suffix) self.user_agent = self.user_agent.encode("ascii") if self._ip_blacklist: real_reactor = hs.get_reactor() # If we have an IP blacklist, we need to use a DNS resolver which # filters out blacklisted IP addresses, to prevent DNS rebinding. nameResolver = IPBlacklistingResolver( real_reactor, self._ip_whitelist, self._ip_blacklist ) @implementer(IReactorPluggableNameResolver) class Reactor(object): def __getattr__(_self, attr): if attr == "nameResolver": return nameResolver else: return getattr(real_reactor, attr) self.reactor = Reactor() else: self.reactor = hs.get_reactor() # the pusher makes lots of concurrent SSL connections to sygnal, and # tends to do so in batches, so we need to allow the pool to keep # lots of idle connections around. pool = HTTPConnectionPool(self.reactor) pool.maxPersistentPerHost = max((100 * CACHE_SIZE_FACTOR, 5)) pool.cachedConnectionTimeout = 2 * 60 # The default context factory in Twisted 14.0.0 (which we require) is # BrowserLikePolicyForHTTPS which will do regular cert validation # 'like a browser' self.agent = Agent( self.reactor, connectTimeout=15, contextFactory=self.hs.get_http_client_context_factory(), pool=pool, ) if self._ip_blacklist: # If we have an IP blacklist, we then install the blacklisting Agent # which prevents direct access to IP addresses, that are not caught # by the DNS resolution. self.agent = BlacklistingAgentWrapper( self.agent, self.reactor, ip_whitelist=self._ip_whitelist, ip_blacklist=self._ip_blacklist, )
def my_getPage(self, p_url): l_d = Agent(reactor).request('GET', p_url, Headers({'User-Agent': ['twisted']}), None) l_d.addCallbacks(self.handleResponse, self.handleError) return l_d
def __init__(self, pem=None): self._agent = Agent(reactor, NotificationPolicyForHTTPS(pem))
class ScrapflyHTTPDownloader: def __init__(self, settings, crawler=None): self._crawler = crawler self.agent = Agent(reactor) if settings.get('SCRAPFLY_SSL_VERIFY') is False: import twisted.internet._sslverify as v v.platformTrust = lambda: None @classmethod def from_crawler(cls, crawler): return cls(crawler.settings, crawler) def _cb_bodydone(self, twisted_response: Response, request: ScrapflyScrapyRequest, spider: ScrapflySpider) -> Deferred: headers = CaseInsensitiveDict() status_code = twisted_response.code reason = twisted_response.phrase.decode('utf-8') for name, values in twisted_response.headers.getAllRawHeaders(): headers[name.decode('utf-8')] = '; '.join( [value.decode('utf-8') for value in values]) deferred = Deferred() body_receiver = BodyReceiver(deferred) if 'x-scrapfly-api-cost' in headers: self._crawler.stats.inc_value('scrapfly/api_call_cost', count=int( headers['x-scrapfly-api-cost'])) def on_body_downloaded(body): if 'content-encoding' in headers: if headers['content-encoding'] == 'gzip': body = zlib.decompress(body, 16 + zlib.MAX_WBITS) elif headers['content-encoding'] == 'br': try: from brotli import brotli except ImportError: print( 'You must run pip install scrapfly-sdk[speedups]') raise body = brotli.decompress(body) response = requests.Response() response.status_code = status_code response.reason = reason response._content = body response.headers.update(headers) response.url = request.url request.scrape_config.raise_on_upstream_error = False scrapfly_api_response: ScrapeApiResponse = spider.scrapfly_client._handle_response( response=response, scrape_config=request.scrape_config) self._crawler.stats.inc_value( 'scrapfly/bandwidth_consumed', count=scrapfly_api_response.context['bandwidth_consumed']) return ScrapflyScrapyResponse( request=request, scrape_api_response=scrapfly_api_response) deferred.addCallback(on_body_downloaded) twisted_response.deliverBody(body_receiver) return deferred def download_request(self, request, spider): if not isinstance(request, ScrapflyScrapyRequest) or not isinstance( spider, ScrapflySpider): return None request_data = spider.scrapfly_client._scrape_request( scrape_config=request.scrape_config) uri = '%s?%s' % (request_data['url'], urlencode( request_data['params'])) request_kwargs = { 'method': request_data['method'].encode('utf-8'), 'uri': uri.encode('utf-8'), 'headers': Headers({ name: [value] for name, value in request_data['headers'].items() }) } if request_data['method'] in ['POST', 'PUT', 'PATCH']: request_kwargs['bodyProducer'] = BodyProducer( request_data['body'].encode('utf-8')) d = self.agent.request(**request_kwargs) d.addCallback(self._cb_bodydone, request, spider) return d def close(self): pass
parser.add_argument("--warmup", "-w", type=int, default=10, help="Warmup period, sec") parser.add_argument("--timeout", "-t", type=float, default=5.0, help="Timeout, sec") args = parser.parse_args() pool = HTTPConnectionPool(reactor, persistent=True) pool.maxPersistentPerHost = 100 contextFactory = WebClientContextFactory() agent = Agent(reactor, contextFactory, pool=pool) def wait_for_sec_start(): start_sec = int(time.time()) sec = start_sec while sec == start_sec: sleep(0.010) sec = int(time.time()) wait_for_sec_start() stopAt = (datetime.datetime.now() + datetime.timedelta(seconds=args.duration)).replace(microsecond=0) r = Requestor(QUEUE_URL, args.max_inflight, stopAt, args.body_size,
class Launcher(logger.Logger): """Workflow launcher. Parameters: parser A custom argparse.ArgumentParser instance. master_address The server's address (implies Slave mode). listen_address The address to listen (implies Master mode). matplotlib_backend Matplotlib backend to use (only in Master mode). stealth Do not report the status to the web server, do not launch it if necessary (only in Master mode). nodes The list of slaves to launch remotely (only in Master mode). log_file Duplicate all logging to this file. """ graphics_client = None graphics_server = None def __init__(self, interactive=False, **kwargs): super(Launcher, self).__init__() self._initialized = False self._running = False parser = Launcher.init_parser(**kwargs) self.args, _ = parser.parse_known_args(self.argv) self.args.master_address = self.args.master_address.strip() self.args.listen_address = self.args.listen_address.strip() self.testing = self.args.test self.args.matplotlib_backend = self.args.matplotlib_backend.strip() self._slaves = [ x.strip() for x in self.args.nodes.split(',') if x.strip() != "" ] self._slave_launch_transform = self.args.slave_launch_transform if self._slave_launch_transform.find("%s") < 0: raise ValueError("Slave launch command transform must contain %s") if self.args.log_file != "": log_file = self.args.log_file if self.args.log_file_pid: log_base_name = os.path.splitext(os.path.basename(log_file)) log_file = os.path.join( os.path.dirname(log_file), "%s.%d%s" % (log_base_name[0], os.getpid(), log_base_name[1])) logger.Logger.redirect_all_logging_to_file(log_file) self._result_file = self.args.result_file self.info("My Python is %s %s", platform.python_implementation(), platform.python_version()) self.info("My PID is %d", os.getpid()) self.info("My time is %s", datetime.datetime.now()) self.id = str(uuid.uuid4()) if not self.is_slave else None self.log_id = self.args.log_id or self.id if self.logs_to_mongo: if self.mongo_log_addr == "": self.args.log_mongo = root.common.mongodb_logging_address if not self.is_slave: logger.Logger.duplicate_all_logging_to_mongo( self.args.log_mongo, self.log_id, "master") self._monkey_patch_twisted_failure() self._lock = threading.Lock() self._webagg_port = 0 self._agent = None self._workflow = None self._start_time = None self._device = NumpyDevice() self._interactive = interactive self._reactor_thread = None self._notify_update_interval = kwargs.get( "status_update_interval", root.common.web.notification_interval) if self.args.yarn_nodes is not None and self.is_master: self._discover_nodes_from_yarn(self.args.yarn_nodes) def __getstate__(self): return {} def _monkey_patch_twisted_failure(self): from twisted.python.failure import Failure original_raise = Failure.raiseException launcher = self def raiseException(self): try: original_raise(self) except: launcher.exception("Error inside Twisted reactor:") reactor.callFromThread(launcher.stop) if original_raise != raiseException: Failure.raiseException = raiseException @staticmethod def init_parser(**kwargs): """ Initializes an instance of argparse.ArgumentParser. """ parser = kwargs.get("parser", argparse.ArgumentParser()) parser.add_argument("-m", "--master-address", type=str, default=kwargs.get("master_address", ""), help="Workflow will be launched in client mode " "and connected to the master at the specified " "address.").mode = ("slave", ) parser.add_argument("-l", "--listen-address", type=str, default=kwargs.get("listen_address", ""), help="Workflow will be launched in server mode " "and will accept client connections at the " "specified address.").mode = ("master", ) parser.add_argument("-t", "--test", default=kwargs.get("test", False), help="Use the (assumably) trained model.", action='store_true') parser.add_argument("-p", "--matplotlib-backend", type=str, nargs='?', const="", default=kwargs.get( "matplotlib_backend", root.common.graphics.matplotlib.backend), help="Matplotlib drawing backend.") parser.add_argument("--no-graphics-client", default=kwargs.get("graphics_client", False), help="Do not launch the graphics client. Server " "will still be started unless matplotlib backend " "is an empty string.", action='store_true') parser.add_argument("--pdb-on-finish", default=False, help="Drop into pdb session on workflow finish.", action='store_true') parser.add_argument("-s", "--stealth", default=kwargs.get("stealth", False), help="Do not report own status to the Web Status " "Server.", action='store_true') parser.add_argument("-n", "--nodes", type=str, default=kwargs.get("nodes", ""), help="The list of slaves to launch remotely " "separated by commas. Slave format is " "host/OpenCLPlatformNumber:OpenCLDevice(s)xN," "examples: host/0:0, host/1:0-2, " "host/0:2-3x3.").mode = ("master", ) parser.add_argument("-f", "--log-file", type=str, default=kwargs.get("log_file", ""), help="The file name where logs will be written.") parser.add_argument("--log-file-pid", default=False, action='store_true', help="Insert process ID into the log file name.") parser.add_argument("-g", "--log-mongo", type=str, nargs='?', const="", default=kwargs.get("log_mongo", "no"), help="MongoDB server address where logs will be " "sent.") parser.add_argument("-i", "--log-id", type=str, default=kwargs.get("log_id", ""), help="Log identifier (used my Mongo logger).") parser.add_argument("--yarn-nodes", type=str, default=None, help="Discover the nodes from this YARN " "ResourceManager's address.").mode = ("master", ) parser.add_argument("--max-nodes", type=int, default=0, help="Max number of slaves launched. 0 means " "unlimited number.").mode = ("master", ) parser.add_argument("--slave-launch-transform", type=str, default="%s", help="Transformation of the slave remote launch " "command given over ssh (%%s corresponds to the " "original command).").mode = ("master", ) parser.add_argument("--result-file", help="The path where to store the execution " "results (in JSON format).").mode = \ ("master", "standalone") return parser @property def interactive(self): return self._interactive @property def testing(self): return self._testing @testing.setter def testing(self, value): if not isinstance(value, bool): raise TypeError("testing must be boolean (got %s)" % type(value)) assert not self.is_initialized, "Too late for setting this" self._testing = value @property def id(self): return self._id @id.setter def id(self, value): self._id = value if self.id is not None: self.info("My ID is %s", self.id) @property def log_id(self): return self._log_id @log_id.setter def log_id(self, value): self._log_id = value if self.log_id is not None: self.info("My log ID is %s", self.log_id) @property def runs_in_background(self): return self.args.background @property def logs_to_mongo(self): return self.args.log_mongo != "no" @property def mongo_log_addr(self): return self.args.log_mongo @property def matplotlib_backend(self): return self.args.matplotlib_backend @property def reports_web_status(self): return not self.args.stealth and not self.is_slave @property def slaves(self): return self._slaves if self.is_master else [] @property def webagg_port(self): return self._webagg_port @property def is_master(self): return True if self.args.listen_address else False @property def is_slave(self): return True if self.args.master_address else False @property def is_standalone(self): return not self.is_master and not self.is_slave @property def is_main(self): return False @property def mode(self): if self.is_master: return "master" if self.is_slave: return "slave" if self.is_standalone: return "standalone" raise RuntimeError("Impossible happened") @property def device(self): return self._device @property def is_initialized(self): return self._initialized @property def is_running(self): return self._running @property def workflow(self): return self._workflow @property def agent(self): return self._agent @property def plots_endpoints(self): return (Launcher.graphics_server.endpoints["epgm"] + [Launcher.graphics_server.endpoints["ipc"]]) \ if getattr(self, "graphics_server", None) is not None else [] @property def start_time(self): return self._start_time def threadsafe(fn): def wrapped(self, *args, **kwargs): with self._lock: return fn(self, *args, **kwargs) name = getattr(fn, '__name__', getattr(fn, 'func', wrapped).__name__) wrapped.__name__ = name + '_threadsafe' return wrapped @threadsafe def add_ref(self, workflow): """ Links with the nested Workflow instance, so that we are able to initialize. """ self._workflow = workflow workflow.run_is_blocking = False self.workflow.thread_pool.workflow = workflow if self.is_slave or self.matplotlib_backend == "": workflow.plotters_are_enabled = False workflow.result_file = self._result_file def del_ref(self, workflow): pass def on_workflow_finished(self): if threading.current_thread().ident == self._reactor_thread_ident: reactor.callWhenRunning(self.stop) return reactor.callFromThread(self.stop) self.debug("%s signalled that it had finished, enqueued self.stop", self.workflow) # Sometimes, reactor does not wake up from the first attempt # (inside callFromThread). This looks absurd, but it's true. # os.fsync on reactor.waker.o does not help (not a buffering issue?). while self._running: self.debug("wake up, Neo") reactor.wakeUp() def device_thread_pool_detach(self): if self.device.is_attached(self.workflow.thread_pool): self.device.thread_pool_detach(self.workflow.thread_pool) @threadsafe def initialize(self, **kwargs): # Ensure reactor stops in some rare cases when it does not normally if not self.interactive: self.workflow.thread_pool.register_on_shutdown( Launcher._reactor_shutdown) else: self._interactive_shutdown_ref = self._interactive_shutdown ThreadPool.register_atexit(self._interactive_shutdown_ref) if self.is_slave: self._agent = SlaveManager(self.args.master_address, self.workflow) def on_id_received(node_id, log_id): self.id = node_id self.log_id = log_id if self.logs_to_mongo: logger.Logger.duplicate_all_logging_to_mongo( self.args.log_mongo, self.log_id, node_id) self.agent.on_id_received = on_id_received else: if self.reports_web_status: timeout = self._notify_update_interval / 2 self._web_status_agent = Agent( reactor, pool=HTTPConnectionPool(reactor), connectTimeout=timeout) # Launch the status server if it's not been running yet self._launch_status() if self.workflow.plotters_are_enabled and \ (not self.interactive or Launcher.graphics_client is None): try: Launcher.graphics_server, Launcher.graphics_client = \ graphics_server.GraphicsServer.launch( self.workflow.thread_pool, self.matplotlib_backend, self._set_webagg_port, self.args.no_graphics_client) except Exception as e: self.error("Failed to create the graphics server and/or " "client. Try to completely disable plotting " "with -p ''.") raise from_none(e) elif self.args.no_graphics_client: self.warning("Plotters are disabled. --no-graphics-client has " "no effect.") if self.is_master: try: self._agent = MasterManager(self.args.listen_address, self.workflow) # Launch the nodes described in the command line or config self._launch_nodes() except Exception as e: self._stop_graphics() raise from_none(e) # The last moment when we can do this, because OpenCL device curses # new process creation try: self._generate_workflow_graphs() except Exception as e: self.error("Failed to generate the workflow graph(s)") self._stop_graphics() raise from_none(e) try: if not self.is_master and not kwargs.get("no_device", False): self._device = Device() except Exception as e: self.error("Failed to create the OpenCL device") self._stop_graphics() raise from_none(e) if "no_device" in kwargs: del kwargs["no_device"] self.workflow.reset_thread_pool() def greet_reactor(): def set_thread_ident(): self._reactor_thread_ident = threading.current_thread().ident reactor.callWhenRunning(self.info, "Reactor is running") reactor.callWhenRunning(set_thread_ident) def initialize_workflow(): try: self.workflow.initialize(device=self.device, **kwargs) except Exception as ie: self.error("Failed to initialize the workflow") self._stop_graphics() self.device_thread_pool_detach() raise from_none(ie) if not self.interactive: # delay greet_reactor() until everything else is initialized initialize_workflow() else: greet_reactor() reactor.callWhenRunning(initialize_workflow) if not self.is_standalone: self._agent.initialize() if not self.interactive: trigger = reactor.addSystemEventTrigger trigger('before', 'shutdown', self._on_stop) trigger('after', 'shutdown', self._print_stats) trigger('after', 'shutdown', self.event, "work", "end", height=0.1) else: register = self.workflow.thread_pool.register_on_shutdown self._on_stop_ref = self._on_stop register(self._on_stop_ref) self._print_stats_ref = self._print_stats register(self._print_stats_ref) def work_end(): self.event("work", "end", height=0.1) self._work_end = work_end register(self._work_end) for unit in self.workflow: if isinstance(unit, Plotter): unit.graphics_server = Launcher.graphics_server greet_reactor() self._initialized = True def run(self): """Starts Twisted reactor, invokes attached workflow's run() and does periodic status updates. """ self._pre_run() if self.interactive: if not reactor.running and self._reactor_thread is None: reactor._handleSignals() self._reactor_thread = threading.Thread( name="TwistedReactor", target=reactor.run, kwargs={"installSignalHandlers": False}) self._reactor_thread.start() return try: reactor.run() except: self.exception("Reactor malfunction. The whole facility is going " "to be destroyed in 10 minutes. Personnel " "evacuation has been started.") finally: with self._lock: self._running = False def boot(self, **kwargs): """ Initializes and runs the attached workflow. :param kwargs: The keyword arguments to pass to initialize(). """ self.initialize(**kwargs) self.run() def stop(self): """Stops Twisted reactor and Workflow execution. """ with self._lock: if self.workflow is None: return running = self._running and reactor.running if self.is_master and self.agent is not None and \ len(self.agent.protocols) > 0: self.info("Waiting for the slaves to finish (%d left)...", len(self.agent.protocols)) return if not running or self.interactive: self._on_stop() return try: reactor.stop() except ReactorNotRunning: pass except: self.exception("Failed to stop the reactor. There is going to be " "a meltdown unless you immediately activate the " "emergency graphite protection.") @staticmethod def stop_reactor(self): if not self.interactive: self.warning("This is designed for the interactive mode") reactor.stop() def pause(self): self.workflow.thread_pool.pause() def resume(self): self.workflow.thread_pool.resume() def launch_remote_progs(self, host, *progs, **kwargs): self.info("Launching %d instance(s) on %s", len(progs), host) cwd = kwargs.get("cwd", os.getcwd()) self.debug("launch_remote_progs: cwd: %s", cwd) python_path = kwargs.get("python_path", os.getenv("PYTHONPATH")) if os.path.splitext(os.path.basename(sys.argv[0]))[0] == "__main__": if python_path is None: python_path = cwd else: python_path += ":" + cwd if python_path is not None: self.debug("launch_remote_progs: PYTHONPATH: %s", python_path) ppenv = "export PYTHONPATH='%s' && " % python_path else: ppenv = "" pc = paramiko.SSHClient() try: pc.set_missing_host_key_policy(paramiko.AutoAddPolicy()) try: pc.connect(host, look_for_keys=True, timeout=0.2) except paramiko.ssh_exception.SSHException: self.exception("Failed to connect to %s", host) return buf_size = 128 channel = pc.get_transport().open_session() channel.get_pty() for prog in progs: prog = prog.replace(r'"', r'\"').replace(r"'", r"\'") cmd = self._slave_launch_transform % ("cd '%s' && %s%s" % (cwd, ppenv, prog)) self.debug("Executing %s", cmd) channel.exec_command(cmd) answer = channel.recv(buf_size) if answer: buf = channel.recv(buf_size) while buf: answer += buf buf = channel.recv(buf_size) self.warning("SSH returned:\n%s", answer.decode('utf-8')) channel.close() except: self.exception("Failed to launch '%s' on %s", progs, host) finally: pc.close() @threadsafe def _pre_run(self): if not self._initialized: raise RuntimeError("Launcher was not initialized") if self._running: raise RuntimeError("Launcher is already running") self._running = True self._start_time = time.time() if self.reports_web_status: self._notify_update_last_time = self.start_time self._notify_status() if not self.is_slave: def run_workflow(): self.workflow.stopped = False self.workflow.thread_pool.start() self.workflow.thread_pool.callInThread(self.workflow.run) reactor.callWhenRunning(run_workflow) self.event("work", "begin", height=0.1) def _on_stop(self): if self.workflow is None or not self._initialized: return self._on_stop_locked() @threadsafe def _on_stop_locked(self): if self.args.pdb_on_finish: import pdb pdb.set_trace() self.info("Stopping everything (%s mode)", self.mode) self._initialized = False self._running = False # Wait for the own graphics client to terminate normally self._stop_graphics() if not self.is_standalone: self.agent.close() self.workflow.thread_pool.shutdown() threadsafe = staticmethod(threadsafe) @staticmethod def _prepare_reactor_shutdown(): original_stop = reactor.stop def stop(): try: original_stop() except ReactorNotRunning: pass reactor.stop = stop @staticmethod def _reactor_shutdown(): Launcher._prepare_reactor_shutdown() reactor.sigInt() def _interactive_shutdown(self): assert self.interactive self.debug("Shutting down in interactive mode") Launcher._prepare_reactor_shutdown() reactor.callFromThread(reactor.stop) self._stop_graphics(True) if self._reactor_thread is not None and \ self._reactor_thread.is_alive(): self._reactor_thread.join() def _stop_graphics(self, interactive_stop=False): if self.interactive and not interactive_stop: return if Launcher.graphics_client is not None: attempt = 0 while Launcher.graphics_client.poll() is None and attempt < 10: if attempt == 1: self.info("Signalling the graphics client to finish " "normally...") Launcher.graphics_server.shutdown() attempt += 1 time.sleep(0.2) if Launcher.graphics_client.poll() is None: Launcher.graphics_client.terminate() self.info("Waiting for the graphics client to finish after " "SIGTERM...") try: Launcher.graphics_client.wait(0.5) self.info("Graphics client has been terminated") except subprocess.TimeoutExpired: os.kill(Launcher.graphics_client.pid, signal.SIGKILL) self.info("Graphics client has been killed") else: self.info("Graphics client returned normally") def _generate_workflow_graphs(self): if not self.is_slave and self.reports_web_status: try: self.workflow_graph, _ = self.workflow.generate_graph( filename=None, write_on_disk=False, with_data_links=True) except RuntimeError as e: self.warning("Failed to generate the workflow graph: %s", e) self.workflow_graph = "" units_wanting_graph = [ u for u in self.workflow if getattr(u, "wants_workflow_graph", False) ] if len(units_wanting_graph) > 0: for unit in units_wanting_graph: self.info( "Rendering the workflow graphs as requested by %s...", unit) unit.workflow_graphs = {} for fmt in "svg", "png": with NamedTemporaryFile(suffix="veles_workflow.%s" % fmt) \ as wfgf: kwargs = getattr(unit, "workflow_graph_kwargs", {}) kwargs["quiet"] = True self.workflow.generate_graph(wfgf.name, **kwargs) wfgf.seek(0, os.SEEK_SET) unit.workflow_graphs[fmt] = wfgf.read() def _print_stats(self): self.workflow.print_stats() if self.agent is not None: self.agent.print_stats() if self.start_time is not None: self.info( "Time elapsed: %s", datetime.timedelta(seconds=(time.time() - self.start_time))) def _launch_status(self): if not self.reports_web_status: return sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) result = sock.connect_ex((root.common.web.host, root.common.web.port)) sock.close() if result != 0: self.info("Launching the web status server") self.launch_remote_progs( root.common.web.host, "PYTHONPATH=%s %s 2>>%s" % (os.path.dirname(root.common.dirs.veles), os.path.join(root.common.dirs.veles, "web_status.py"), "%s.stderr%s" % os.path.splitext(root.common.web.log_file))) else: self.info("Web status server %s:%d is already running", root.common.web.host, root.common.web.port) def _launch_nodes(self): if len(self.slaves) == 0: return self.debug("Will launch the following slaves: %s", ', '.join(self.slaves)) filtered_argv = filter_argv(sys.argv, "-l", "--listen-address", "-n", "--nodes", "-p", "--matplotlib-backend", "-b", "--background", "-s", "--stealth", "-a", "--backend", "-d", "--device", "--slave-launch-transform", "--result-file", "--pdb-on-finish", "--respawn", "--job-timeout")[1:] host = self.args.listen_address[:self.args.listen_address.index(':')] port = self.args.listen_address[len(host) + 1:] # No way we can send 'localhost' or empty host name to a slave. if not host or host in ("0.0.0.0", "localhost", "127.0.0.1"): host = socket.gethostname() filtered_argv.insert( 0, "-m %s:%s -b -i \"%s\"" % (host, port, self.log_id)) slave_args = " ".join(filtered_argv) self.debug("Slave args: %s", slave_args) total_slaves = 0 max_slaves = self.args.max_nodes or 1000 cmdline = "%s %s" % (sys.executable, os.path.abspath(sys.argv[0])) + \ " --backend %s --device %s " + slave_args if self.args.log_file: cmdline += " &>> " + self.args.log_file for node in self.slaves: host, devs = node.split('/') progs = [cmdline % dev for dev in Device.iterparse(devs)] if total_slaves + len(progs) > max_slaves: progs = progs[:max_slaves - total_slaves] total_slaves += len(progs) self.launch_remote_progs(host, *progs) if total_slaves >= max_slaves: break def _set_webagg_port(self, port): self.info("Found out the WebAgg port: %d", port) self._webagg_port = port def _on_notify_status_error(self, error): self.warning("Failed to upload the status: %s", error) reactor.callLater(self._notify_update_interval, self._notify_status) def _notify_status(self, response=None): if not self._running: return time_passed = time.time() - self._notify_update_last_time if time_passed < self._notify_update_interval: reactor.callLater(self._notify_update_interval - time_passed, self._notify_status) return self._notify_update_last_time = time.time() mins, secs = divmod(time.time() - self.start_time, 60) hours, mins = divmod(mins, 60) ret = { 'id': self.id, 'log_id': self.log_id, 'name': self.workflow.name, 'master': socket.gethostname(), 'time': "%02d:%02d:%02d" % (hours, mins, secs), 'user': getpass.getuser(), 'graph': self.workflow_graph, 'log_addr': self.mongo_log_addr, 'slaves': self._agent.nodes if self.is_master else [], 'plots': "http://%s:%d" % (socket.gethostname(), self.webagg_port), 'custom_plots': "<br/>".join(self.plots_endpoints), 'description': "<br />".join(escape(self.workflow.__doc__ or "").split("\n")) } url = "http://%s:%d/update" % (root.common.web.host, root.common.web.port) headers = Headers({b'User-Agent': [b'twisted']}) body = FileBodyProducer(BytesIO(json.dumps(ret).encode('charmap'))) self.debug("Uploading status update to %s", url) d = self._web_status_agent.request(b'POST', url.encode('ascii'), headers=headers, bodyProducer=body) d.addCallback(self._notify_status) d.addErrback(self._on_notify_status_error) def _discover_nodes_from_yarn(self, address): if address.find(':') < 0: address += ":8088" if address[:7] != "http://": address = "http://" + address address += "/ws/v1/cluster/nodes" self.debug("Requesting GET %s", address) getPage(address.encode('ascii')).addCallbacks( callback=self._parse_yarn_nodes_json, errback=lambda error: self.warning( "Failed to get the nodes list from YARN ResourceManager: %s", error)) def _parse_yarn_nodes_json(self, response): rstr = response.decode() self.debug("Received YARN response: %s", rstr) tree = json.loads(rstr) for node in tree["nodes"]["node"]: self._slaves.append(node["nodeHostName"] + "/0:0") reactor.callLater(0, self._launch_nodes)
def run_client(rhost, rport, basepath=None): dom = 'http://%s:%s%s' % (rhost, rport, '' if not basepath else '/' + basepath.lstrip('/')) s = get_start_port() ports = dict([(k, counter(s)) for k in PROTOCOL.iterkeys()]) finis = dict([(k, counter(s + 1)) for k in PROTOCOL.iterkeys()]) def request_send(rhost, type, port): def cbRequestEnd(): fini = finis[type].nextport() next = ports[type].nextport() if (next > 0): request_send(rhost, type, next) else: for pc in finis.itervalues(): if pc.port >= 0: return from twisted.internet.error import ReactorNotRunning try: reactor.stop() except ReactorNotRunning: pass try: sock = sock_open(rhost, type, port, True) except socket.error, e: if e[0] not in ERR_IN_USE: raise print_flush( "skipping test on %s port %s since it is already in use" % (type, port)) cbRequestEnd() return d = Agent(reactor).request( 'GET', '%s/%s/%s' % (dom, type, port), Headers({'User-Agent': ['%s/%s' % (NAME, VERSION)]}), None) def cbResponse(response): if response.code == 200: try: lsock = PROTOCOL[type][1](sock) lsock.setblocking(1) lsock.settimeout(4.0) try: w = lsock.recv(8192) if type == 'tcp': lsock.shutdown(socket.SHUT_RDWR) print_flush("%s port %s is visible to %s (%s)" % (type, port, rhost, w)) finally: lsock.close() except socket.error, e: print_flush("recv failed on %s %s %s %s" % (rhost, type, port, e)) raise finally:
def privmsg(self, user, channel, msg): """Called when the bot receives a message, both public and private.""" user = user.split('!', 1)[0] # By default, don't reply to anything canReply = False private = False replyChannel = channel parseMsg = msg directAddress = False # Debug print ALL messages #print "Message from '%s' on '%s': '%s'" % (user, channel, msg) # Check to see if they're sending me a private message if channel == self.nickname: canReply = True private = True replyChannel = user self.log("Private message from %s: %s" % (user, msg)) if str.lower(user) == "nickserv": self.log("Nickserv says: %s" % msg) if parseMsg.startswith("whois "): whoisnick = parseMsg.split(" ", 1)[1] print "Doing a whois '%s'" % whoisnick self.whois(whoisnick) # Update the seen database, but only if it's not a private message if channel in self.channelList and not private: self.db.updateSeen(user, channel, msg) # If kthx said something, mark him as here and ignore everything he says if user == trackednick and not private: if (self.trackedpresent[channel] == False): self.trackedpresent[channel] = True self.emailClient.threadsend( "%s status" % self.nickname, "%s spoke in %s unexpectedly and got marked as present: %s" % (user, channel, msg)) return # If kthx is gone, then we can always reply if not private and not self.trackedpresent[channel]: canReply = True # Check to see if we have a tell waiting for this user tells = self.db.getTell(user) if tells: for message in tells: print "Found tell for '%s' from '%s'" % (user, message[Tell.author]) author = message[Tell.author] timestring = timesincestring(message[Tell.timestamp]) text = message[Tell.message] inTracked = message[Tell.inTracked] # We have 3 cases: # 1) kthx was around when this tell happened and is still around now. # In this case, we assume kthx will relay the message and just delete it # 2) kthx was around when this tell happened and is not here now. # In this case, we want to send the message and mention that kthx may repeat it # 3) kthx was not around when this tell happened and may or may not be here now # Whether or not kthx is now here, we need to say the message # 4) gthx was specifically addressed for this tell # Whether or not kthx is now here, we need to say the message # # If we can't reply, it means that kthx is present. In that # case, the tell has already been erased, so in both cases, # we're good. if canReply or not inTracked: if inTracked: self.msg( replyChannel, "%s: %s ago <%s> tell %s %s (%s may repeat this)" % (user, timestring, author, user, text, trackednick)) else: self.msg( replyChannel, "%s: %s ago <%s> tell %s %s" % (user, timestring, author, user, text)) # Check for specifically addressed messages m = re.match(self.matchNick, parseMsg) if m: print "Found message addressed to '%s'. My nick is '%s'." % ( m.group(1), self.nickname) parseMsg = m.group(3) # Mark it as a direct address so we can look for a factoid directAddress = True # If it's addressed directly to me, we can reply if m.group(1) == self.nickname: canReply = True # Check for status query if canReply and parseMsg == "status?": if (trackednick): if (private): reply = "%s: OK; Up for %s; " % ( VERSION, timesincestring(self.uptimeStart)) for channel in self.channelList: reply += "%s %s; " % (channel, "PRESENT" if self.trackedpresent[channel] else "GONE") else: reply = "%s: OK; Up for %s; %s is %s" % ( VERSION, timesincestring( self.uptimeStart), trackednick, "PRESENT" if self.trackedpresent[channel] else "GONE") else: reply = "%s: OK; Up for %s; standalone mode" % ( VERSION, timesincestring(self.uptimeStart)) mood = self.db.mood() reply += " mood: %s" % self.moodToString(mood) self.msg(replyChannel, reply) return # Check for lurker query if canReply and parseMsg == "lurkers?": self.msg(replyChannel, "Looking for lurkers...") self.lurkerReplyChannel = replyChannel self.lurkerCount = 0 self.channelCount = 0 print "Sending request 'NAMES %s'" % channel self.sendLine("NAMES %s" % channel) return # Check for tell query m = self.tellQuery.match(parseMsg) if m and directAddress: print "Got tell from '%s' for '%s' message '%s'." % ( user, m.group(1), m.group(2)) # The is in the tracked bot if the tracked bot is present and it was not a message # specifically directed to us. This is a little tricky since the only way to know # that a message was specifically directed to us is to see if it was a direct address # and we can reply success = self.db.addTell( user, m.group(1), m.group(2), not (directAddress and canReply) and self.trackedpresent[channel]) if success and canReply: self.msg( replyChannel, "%s: I'll pass that on when %s is around." % (user, m.group(1))) return # Check for seen query if canReply: m = self.seenQuery.match(parseMsg) if m: queryname = m.group(1) print "%s asked about '%s'" % (user, queryname) rows = self.db.seen(queryname) if len(rows) == 0: reply = "Sorry, I haven't seen %s." % queryname self.msg(replyChannel, reply) for i, row in enumerate(rows): reply = "%s was last seen in %s %s ago saying '%s'." % ( row[Seen.name], row[Seen.channel], timesincestring( row[Seen.timestamp]), row[Seen.message]) self.msg(replyChannel, reply) if i >= 2: # Don't reply more than 3 times to a seen query break return # Check for google query if directAddress: if canReply: m = self.googleQuery.match(parseMsg) if m: queryname = urllib.quote_plus(m.group(1)) foruser = m.group(2) print "%s asked to google '%s' for %s" % (user, queryname, foruser) reply = "%s: http://lmgtfy.com/?q=%s" % (foruser, queryname) self.msg(replyChannel, reply) return # Check for setting a factoid factoid = None if directAddress: factoid = self.factoidSet.match(parseMsg) if factoid: invalidwords = re.match( '(here|how|it|something|that|this|what|when|where|which|who|why|you)', factoid.group(1), re.IGNORECASE) if not invalidwords: safeFactoid = factoid.group(1).decode("utf-8") print "%s tried to set factoid '%s'." % (user, safeFactoid) success = self.db.addFactoid( user, factoid.group(1), True if factoid.group(2) == 'are' else False, factoid.group(4), True if not factoid.group(3) else False) if canReply: if success: self.msg(replyChannel, "%s: Okay." % user) else: self.msg( replyChannel, "I'm sorry, %s. I'm afraid I can't do that." % user) # Check for getting a factoid if canReply: f = self.factoidQuery.match(parseMsg) if f: safeFactoid = f.group(1).decode("utf-8") print "factoid query from %s:%s for '%s'" % (user, channel, safeFactoid) answer = self.getFactoidString(f.group(1)) if answer: # Replace !who and !channel in the reply answer = re.sub("!who", user, answer) answer = re.sub("!channel", channel, answer) if answer.startswith("<reply>"): answer = answer[7:] if answer.startswith("<action>"): self.describe(replyChannel, answer[8:]) else: if (f.group(3)): answer = "%s, %s" % (f.group(3), answer) self.msg(replyChannel, answer) # Check for info request if canReply and parseMsg.startswith("info "): query = parseMsg[5:] if query[-1:] == "?": query = query[:-1] safeFactoid = query.decode("utf-8") print "info request for '%s' ReplyChannel is '%s'" % (safeFactoid, replyChannel) refcount = 0 answer = self.db.infoFactoid(query) if answer: count = answer[0][6] if not count: count = "0" print "Factoid '%s' has been referenced %s times" % ( safeFactoid, count) self.msg( replyChannel, "Factoid '%s' has been referenced %s times" % (query, count)) for factoid in answer: user = factoid[3] value = factoid[2] if not user: user = "******" if value: print "At %s, %s set to: %s" % (factoid[4], user, value) self.msg( replyChannel, "At %s, %s set to: %s" % (factoid[4], user, value)) else: print "At %s, %s deleted this item" % (factoid[4], user) self.msg( replyChannel, "At %s, %s deleted this item" % (factoid[4], user)) else: print "No info for factoid '%s'" % safeFactoid self.msg(replyChannel, "Sorry, I couldn't find an entry for %s" % query) # Check for forget request if directAddress and parseMsg.startswith("forget "): query = parseMsg[7:] print "forget request for '%s'" % query forgotten = self.db.forgetFactoid(query, user) if canReply: if forgotten: self.msg(replyChannel, "%s: I've forgotten about %s" % (user, query)) else: self.msg( replyChannel, "%s: Okay, but %s didn't exist anyway" % (user, query)) # Check for thingiverse mention if canReply: match = self.thingMention.search(parseMsg) if match: thingId = int(match.group(2)) print "Match for thingiverse query item %s" % thingId rows = self.db.addThingiverseRef(thingId) refs = int(rows[0][0]) title = rows[0][1] if title is None: print "Attemping to get title for thingiverse ID %s" % thingId agent = Agent(reactor) titleQuery = agent.request( 'GET', 'https://www.thingiverse.com/thing:%s' % thingId, Headers({'User-Agent': ['gthx IRC bot']}), None) def titleResponse(title): if title: title = unescape(title) self.db.addThingiverseTitle(thingId, title) print "The title for thing %s is: %s " % (thingId, title) reply = '%s linked to "%s" on thingiverse => %s IRC mentions' % ( user, title, refs) self.msg(replyChannel, reply) else: print "No title found for thing %s" % (thingId) reply = '%s linked to thing %s on thingiverse => %s IRC mentions' % ( user, thingId, refs) self.msg(replyChannel, reply) def queryResponse(response): if response.code == 200: finished = Deferred() finished.addCallback(titleResponse) response.deliverBody(TitleParser(finished)) return finished print "Got error response from thingiverse query: %s" % ( response) titleResponse(None) return None titleQuery.addCallback(queryResponse) else: print "Already have a title for thing %s: %s" % (thingId, title) reply = '%s linked to "%s" on thingiverse => %s IRC mentions' % ( user, title, refs) self.msg(replyChannel, reply) # Check for youtube mention if canReply: match = self.youtubeMention.search(parseMsg) if match: youtubeId = match.group(3) fullLink = match.group(0) print "Match for youtube query item %s" % youtubeId rows = self.db.addYoutubeRef(youtubeId) refs = int(rows[0][0]) title = rows[0][1] if title is None: print "Attemping to get title for youtubeId %s" % youtubeId agent = Agent(reactor) titleQuery = agent.request( 'GET', 'https://www.youtube.com/watch?v=%s' % youtubeId, Headers({'User-Agent': ['gthx IRC bot']}), None) def titleResponse(title): if title: title = unescape(title) self.db.addYoutubeTitle(youtubeId, title) print "The title for video %s is: %s " % ( youtubeId, title) reply = '%s linked to YouTube video "%s" => %s IRC mentions' % ( user, title, refs) print "Reply is: %s" % reply self.msg(replyChannel, reply) print "Message sent." else: print "No title found for youtube video %s" % ( youtubeId) reply = '%s linked to a YouTube video with an unknown title => %s IRC mentions' % ( fullLink, refs) self.msg(replyChannel, reply) def queryResponse(response): if response.code == 200: finished = Deferred() finished.addCallback(titleResponse) response.deliverBody(TitleParser(finished)) return finished print "Got error response from youtube query: %s:%s" % ( response.code, response.phrase) pprint(list(response.headers.getAllRawHeaders())) titleResponse(None) return None titleQuery.addCallback(queryResponse) else: print "Already have a title for item %s: %s" % (youtubeId, title) reply = '%s linked to YouTube video "%s" => %s IRC mentions' % ( user, title, refs) self.msg(replyChannel, reply)
def connectionLost(self, reason): self.deferred.callback(b''.join(self.dataBuffer)) def readBody(response): d = defer.Deferred() response.deliverBody( SimpleBodyProtocol(response.code, response.phrase, d)) return d try: from twisted.web.client import HTTPConnectionPool pool = HTTPConnectionPool(reactor) pool._factory.noisy = False agent = RedirectAgent(Agent(reactor, pool=pool)) except ImportError: agent = RedirectAgent(Agent(reactor)) eqnotice_config = config.YAMLConfig( "cfg/EQ_Notice.config.yml", { 'enabled': True, 'timer': 60, 'debug': False, '1': "http://pso2.acf.me.uk/pso2eq.txt" }, True) # HTTP Headers ETag_Headers = ['', '', '', '', '', '', '', '', '', ''] Modified_Headers = ['', '', '', '', '', '', '', '', '', ''] # HTTP Modified in time
def __init__( self, crypto_key=None, datadog_api_key=None, datadog_app_key=None, datadog_flush_interval=None, hostname=None, port=None, router_scheme=None, router_hostname=None, router_port=None, endpoint_scheme=None, endpoint_hostname=None, endpoint_port=None, router_conf={}, router_tablename="router", router_read_throughput=5, router_write_throughput=5, storage_tablename="storage", storage_read_throughput=5, storage_write_throughput=5, message_tablename="message", message_read_throughput=5, message_write_throughput=5, statsd_host="localhost", statsd_port=8125, resolve_hostname=False, max_data=4096, # Reflected up from UDP Router wake_timeout=0, env='development', enable_cors=False, s3_bucket=DEFAULT_BUCKET, senderid_expry=SENDERID_EXPRY, senderid_list={}, hello_timeout=0, bear_hash_key=None, preflight_uaid="deadbeef00000000deadbeef000000000", ): """Initialize the Settings object Upon creation, the HTTP agent will initialize, all configured routers will be setup and started, logging will be started, and the database will have a preflight check done. """ # Use a persistent connection pool for HTTP requests. pool = HTTPConnectionPool(reactor) self.agent = Agent(reactor, connectTimeout=5, pool=pool) # Metrics setup if datadog_api_key: self.metrics = DatadogMetrics( api_key=datadog_api_key, app_key=datadog_app_key, flush_interval=datadog_flush_interval) elif statsd_host: self.metrics = TwistedMetrics(statsd_host, statsd_port) else: self.metrics = SinkMetrics() if not crypto_key: crypto_key = [Fernet.generate_key()] if not isinstance(crypto_key, list): crypto_key = [crypto_key] self.update(crypto_key=crypto_key) self.crypto_key = crypto_key if bear_hash_key is None: bear_hash_key = [] if not isinstance(bear_hash_key, list): bear_hash_key = [bear_hash_key] self.bear_hash_key = bear_hash_key self.max_data = max_data self.clients = {} # Setup hosts/ports/urls default_hostname = socket.gethostname() self.hostname = hostname or default_hostname if resolve_hostname: self.hostname = resolve_ip(self.hostname) self.port = port self.endpoint_hostname = endpoint_hostname or self.hostname self.router_hostname = router_hostname or self.hostname self.router_conf = router_conf self.router_url = canonical_url(router_scheme or 'http', self.router_hostname, router_port) self.endpoint_url = canonical_url(endpoint_scheme or 'http', self.endpoint_hostname, endpoint_port) # Database objects self.router_table = get_router_table(router_tablename, router_read_throughput, router_write_throughput) self.storage_table = get_storage_table(storage_tablename, storage_read_throughput, storage_write_throughput) self.message_table = get_rotating_message_table(message_tablename) self._message_prefix = message_tablename self.storage = Storage(self.storage_table, self.metrics) self.router = Router(self.router_table, self.metrics) # Used to determine whether a connection is out of date with current # db objects. There are three noteworty cases: # 1 "Last Month" the table requires a rollover. # 2 "This Month" the most common case. # 3 "Next Month" where the system will soon be rolling over, but with # timing, some nodes may roll over sooner. Ensuring the next month's # table is present before the switchover is the main reason for this, # just in case some nodes do switch sooner. self.create_initial_message_tables() # Run preflight check preflight_check(self.storage, self.router, preflight_uaid) # CORS self.cors = enable_cors # Force timeout in idle seconds self.wake_timeout = wake_timeout # Setup the routers self.routers = {} self.routers["simplepush"] = SimpleRouter( self, router_conf.get("simplepush")) self.routers["webpush"] = WebPushRouter(self, None) if 'apns' in router_conf: self.routers["apns"] = APNSRouter(self, router_conf["apns"]) if 'gcm' in router_conf: self.routers["gcm"] = GCMRouter(self, router_conf["gcm"]) # Env self.env = env self.hello_timeout = hello_timeout
def initialize(self, **kwargs): # Ensure reactor stops in some rare cases when it does not normally if not self.interactive: self.workflow.thread_pool.register_on_shutdown( Launcher._reactor_shutdown) else: self._interactive_shutdown_ref = self._interactive_shutdown ThreadPool.register_atexit(self._interactive_shutdown_ref) if self.is_slave: self._agent = SlaveManager(self.args.master_address, self.workflow) def on_id_received(node_id, log_id): self.id = node_id self.log_id = log_id if self.logs_to_mongo: logger.Logger.duplicate_all_logging_to_mongo( self.args.log_mongo, self.log_id, node_id) self.agent.on_id_received = on_id_received else: if self.reports_web_status: timeout = self._notify_update_interval / 2 self._web_status_agent = Agent( reactor, pool=HTTPConnectionPool(reactor), connectTimeout=timeout) # Launch the status server if it's not been running yet self._launch_status() if self.workflow.plotters_are_enabled and \ (not self.interactive or Launcher.graphics_client is None): try: Launcher.graphics_server, Launcher.graphics_client = \ graphics_server.GraphicsServer.launch( self.workflow.thread_pool, self.matplotlib_backend, self._set_webagg_port, self.args.no_graphics_client) except Exception as e: self.error("Failed to create the graphics server and/or " "client. Try to completely disable plotting " "with -p ''.") raise from_none(e) elif self.args.no_graphics_client: self.warning("Plotters are disabled. --no-graphics-client has " "no effect.") if self.is_master: try: self._agent = MasterManager(self.args.listen_address, self.workflow) # Launch the nodes described in the command line or config self._launch_nodes() except Exception as e: self._stop_graphics() raise from_none(e) # The last moment when we can do this, because OpenCL device curses # new process creation try: self._generate_workflow_graphs() except Exception as e: self.error("Failed to generate the workflow graph(s)") self._stop_graphics() raise from_none(e) try: if not self.is_master and not kwargs.get("no_device", False): self._device = Device() except Exception as e: self.error("Failed to create the OpenCL device") self._stop_graphics() raise from_none(e) if "no_device" in kwargs: del kwargs["no_device"] self.workflow.reset_thread_pool() def greet_reactor(): def set_thread_ident(): self._reactor_thread_ident = threading.current_thread().ident reactor.callWhenRunning(self.info, "Reactor is running") reactor.callWhenRunning(set_thread_ident) def initialize_workflow(): try: self.workflow.initialize(device=self.device, **kwargs) except Exception as ie: self.error("Failed to initialize the workflow") self._stop_graphics() self.device_thread_pool_detach() raise from_none(ie) if not self.interactive: # delay greet_reactor() until everything else is initialized initialize_workflow() else: greet_reactor() reactor.callWhenRunning(initialize_workflow) if not self.is_standalone: self._agent.initialize() if not self.interactive: trigger = reactor.addSystemEventTrigger trigger('before', 'shutdown', self._on_stop) trigger('after', 'shutdown', self._print_stats) trigger('after', 'shutdown', self.event, "work", "end", height=0.1) else: register = self.workflow.thread_pool.register_on_shutdown self._on_stop_ref = self._on_stop register(self._on_stop_ref) self._print_stats_ref = self._print_stats register(self._print_stats_ref) def work_end(): self.event("work", "end", height=0.1) self._work_end = work_end register(self._work_end) for unit in self.workflow: if isinstance(unit, Plotter): unit.graphics_server = Launcher.graphics_server greet_reactor() self._initialized = True
def check_timeouts(): # before we do anything, make sure it's not "quiet hours" # if it is, do nothing and run this method later if isQuietHours(): global QUIET_START_TIME, QUIET_END_TIME # check again in 30 minutes...this is kind of silly, but hey print "*** Quiet hours are in effect (%d:00 to %d:00, currently: %s), calling again in 10 minutes..." % ( QUIET_START_TIME.hour, QUIET_END_TIME.hour, datetime.datetime.now().strftime("%H:%M")) reactor.callLater(60 * 10, check_timeouts) return instances = TaskInstance.objects.get_timedout_tasks() for instance in instances: agent = Agent(reactor) # ensure that the user is not halted -- if they are, we can't execute this task :\ if instance.patient.halted: # print "ERROR: Cannot timeout task: %s (%d), user is in the halt status" % (sched_task.task.name, sched_task.id) continue print "Timing out instance: %s (%d)" % (instance.task.name, instance.id) payload_dict = {'instanceid': instance.id} payload = urllib.urlencode(payload_dict) d = agent.request( 'POST', urlparse.urljoin(settings.SCHEDULER_TARGET_URL, "timeout"), Headers({ "Content-Type": ["application/x-www-form-urlencoded;charset=utf-8"], "Content-Length": [str(len(payload))] }), StringProducer(payload)) d.addCallback(instance_timeout_finished, instanceid=instance.id) d.addErrback(instance_timeout_errored, instanceid=instance.id) # *** # also check pokes! # *** pokedinstances = TaskInstance.objects.get_poked_tasks() for instance in pokedinstances: agent = Agent(reactor) # ensure that the user is not halted -- if they are, we can't execute this task :\ if instance.patient.halted: # print "ERROR: Cannot timeout task: %s (%d), user is in the halt status" % (sched_task.task.name, sched_task.id) continue print "Poking instance: %s (%d)" % (instance.task.name, instance.id) payload_dict = {'instanceid': instance.id} payload = urllib.urlencode(payload_dict) d = agent.request( 'POST', urlparse.urljoin(settings.SCHEDULER_TARGET_URL, "poke"), Headers({ "Content-Type": ["application/x-www-form-urlencoded;charset=utf-8"], "Content-Length": [str(len(payload))] }), StringProducer(payload)) d.addCallback(instance_poke_finished, instanceid=instance.id) d.addErrback(instance_poke_errored, instanceid=instance.id) # run again in a bit reactor.callLater(settings.SCHEDULER_CHECK_INTERVAL, check_timeouts)
class RSSFeedSource(BoostingSource): """ Credit mining source from a RSS feed. """ def __init__(self, session, rss_feed, boost_settings, torrent_insert_cb): BoostingSource.__init__(self, session, rss_feed, boost_settings, torrent_insert_cb) self.parsed_rss = None self.torrent_store = self.session.lm.torrent_store # Not all RSS feeds provide us with the infohash, # so we use a fake infohash based on the URL (generated by sha1) to identify the torrents. # keys : fake infohash, value : real infohash. Type : (length 20 string, binary) self.fake_infohash_id = {} self.title = "" self.description = "" self.total_torrents = 0 self.torrent_db = self.session.open_dbhandler(NTFY_TORRENTS) def _on_success_rss(self, body_rss, rss_feed): """ function called when RSS successfully read """ self.register_task(str(self.source) + "_update", LoopingCall(self._update), 10, interval=self.interval) self.parsed_rss = feedparser.parse(body_rss) self._logger.info("Got RSS feed %s", rss_feed) self.ready = True def _on_error_rss(self, failure, rss_feed): """ function called when RSS failed except from 503 aborting load the source """ failure.trap(CancelledError, Error) self._logger.error("Aborting load on : %s. Reason : %s.", rss_feed, failure.getErrorMessage()) if "503" in failure.getErrorMessage(): self.register_task( str(self.source) + "_load_delay", reactor.callLater(10, self._load, rss_feed)) return if rss_feed in self.boosting_manager.boosting_sources: self.boosting_manager.set_enable_mining(rss_feed, False) def _load(self, rss_feed): defer_feed = getPage(rss_feed) defer_feed.addCallback(self._on_success_rss, rss_feed) defer_feed.addErrback(self._on_error_rss, rss_feed) self.register_task(str(self.source) + "_wait_feed", defer_feed) def _update(self): if len(self.torrents) >= self.max_torrents: return feed_elem = self.parsed_rss['feed'] self.title = feed_elem['title'] self.description = feed_elem['subtitle'] torrent_keys = [ 'name', 'metainfo', 'creation_date', 'length', 'num_files', 'num_seeders', 'num_leechers', 'enabled', 'last_seeding_stats' ] def __cb_body(body_bin, item_torrent_entry): tdef = None metainfo = None # tdef.get_infohash returned binary string by length 20 try: metainfo = lt.bdecode(body_bin) tdef = TorrentDef.load_from_dict(metainfo) self.session.save_collected_torrent(tdef.get_infohash(), body_bin) except ValueError, err: self._logger.error( "Could not parse/save torrent, skipping %s. Reason: %s", item_torrent_entry['link'], err.message + ", metainfo is " + ("not " if metainfo else "") + "None") if tdef and len(self.torrents) < self.max_torrents: # Create a torrent dict. real_infohash = tdef.get_infohash() torrent_values = [ item_torrent_entry['title'], tdef, tdef.get_creation_date(), tdef.get_length(), len(tdef.get_files()), -1, -1, self.enabled, {} ] # store the real infohash to generated infohash self.torrents[real_infohash] = dict( zip(torrent_keys, torrent_values)) self.fake_infohash_id[sha1( item_torrent_entry['id']).digest()] = real_infohash # manually generate an ID and put this into DB self.torrent_db.addOrGetTorrentID(real_infohash) self.torrent_db.addExternalTorrent(tdef) # Notify the BoostingManager and provide the real infohash. if self.torrent_insert_callback: self.torrent_insert_callback(self.source, real_infohash, self.torrents[real_infohash]) elif tdef: self._logger.debug( "Max torrents in source reached. Not adding %s", tdef.get_infohash()) def __success_cb(response, item_dict): return readBody(response).addCallback( __cb_body, item_dict).addErrback(self._on_err) regex_unescape_xml = re.compile(r"\&\#(x?[0-9a-fA-F]+);") for item in self.parsed_rss['entries']: f_links = item['links'] for link in f_links: if link['type'] == u'application/x-bittorrent': url = regex_unescape_xml.sub(ent2chr, str(link['href'])) fake_infohash = sha1(url).digest() if fake_infohash not in self.fake_infohash_id.keys(): # create Agent to download torrent file self.fake_infohash_id[fake_infohash] = None agent = Agent(reactor) ses_agent = agent.request( 'GET', # http://stackoverflow.com/a/845595 urllib.quote(url, safe="%/:=&?~#+!$,;'@()*[]"), Headers({'User-Agent': ['Tribler ' + version_id]}), None) ses_agent.addCallback(__success_cb, item).addErrback(self._on_err)