Exemple #1
0
def increment_deployment_counter(bundle_id, charmworld_url):
    """Increment the deployment count in Charmworld.

    If the call to Charmworld fails we log the error but don't report it.
    This counter is a 'best effort' attempt but it will not impede our
    deployment of the bundle.

    Arguments are:
          - bundle_id: the ID for the bundle in Charmworld.
          - charmworld_url: the URL for charmworld, including the protocol.
            If None, do nothing.

    Returns True if the counter is successfully incremented else False.
    """
    if charmworld_url is None:
        raise gen.Return(False)

    if not all((isinstance(bundle_id, basestring), isinstance(charmworld_url, basestring))):
        raise gen.Return(False)

    path = "metric/deployments/increment"
    url = u"{}api/3/bundle/{}/{}".format(charmworld_url, urllib.quote(bundle_id), path)
    logging.info("Incrementing bundle deployment count using\n{}.".format(url.encode("utf-8")))
    client = AsyncHTTPClient()
    # We use a GET instead of a POST since there is not request body.
    try:
        resp = yield client.fetch(url, callback=None)
    except Exception as exc:
        logging.error("Attempt to increment deployment counter failed.")
        logging.error("URL: {}".format(url))
        logging.exception(exc)
        raise gen.Return(False)
    success = bool(resp.code == 200)
    raise gen.Return(success)
    def get(self, path):
        self.set_header('Content-Type', 'image/gif')
        analytics_client_id = self.get_cookie(cookie_name) or str(uuid.uuid4())
        self.set_cookie(cookie_name, analytics_client_id)
        self.write(gif)
        self.finish()

        http_client = AsyncHTTPClient()
        event = gmp.Event(category='RSS', action='view-post', label=path)

        for data, headers in gmp.payloads(
                tracking_id=self.settings['google_analytics_rss_id'],
                client_id=analytics_client_id,
                requestable=event):

            try:
                yield http_client.fetch(
                    gmp.TRACKING_URI,
                    method='POST',
                    headers=headers,
                    body=urllib.urlencode(data))

                logging.info('Tracked RSS view for "%s"' % path)
            except Exception:
                logging.exception('Error tracking RSS view for "%s".' % path)
Exemple #3
0
class SeedSearch(Spiderman):
    """Searches YouTube using keywords/query.
    """

    functions = 'search'

    def search(self):

        self.done = False
        self.requests_made = 1

        # callback function parameters
        search_key = self.search_key = str(datetime.now())
        related_search = self.related_search
        client = self.client

        cb = lambda x: related_search(x, 0)

        keywords = [str(k) for k in self.input_object['seed']]
        search_query = '+'.join(keywords).replace(' ', '+')

        global pages
        global max_results

        self.http_client = AsyncHTTPClient()

        for start_index in range(1, pages):
            request_url = "http://gdata.youtube.com/feeds/api/videos?q={0}&orderby=relevance&alt=jsonc&v=2&max-results={1}&start-index={2}".format(
                    search_query,
                    max_results,
                    start_index*25)
            self.http_client.fetch(request_url, callback=cb)
Exemple #4
0
 def es(self, url, query={}, body={}):
     http_client = AsyncHTTPClient()         
     if not url.startswith('/'):
         url = '/'+url
     for arg in query:
         if not isinstance(query[arg], list):
             query[arg] = [query[arg]]
     try:
         response = yield http_client.fetch(
             'http://{}{}?{}'.format(
                 config['api']['elasticsearch'],
                 url,
                 utils.url_encode_tornado_arguments(query) \
                     if query else '',
             ),
             method='POST' if body else 'GET',
             body=utils.json_dumps(body) if body else None,
         )
         return utils.json_loads(response.body)
     except HTTPError as e:
         try:
             extra = utils.json_loads(e.response.body)
         except:
             extra = {'error': e.response.body.decode('utf-8')}
         raise exceptions.Elasticsearch_exception(
             e.code,
             extra,
         )
    def test_post_participants_valid_request(self):
        db = self.get_db_client().udaan

        event = dict(
                eventName="dummyEvent",
                currentRound=0,
        )

        body = dict(
                names="Dummy Names",
                mobileNumber="9123456789"
        )

        request_body = json.dumps(body)

        event_id = yield db.events.insert(event)
        str_event_id = str(event_id)
        http_client = AsyncHTTPClient()
        response = yield http_client.fetch("http://localhost:8000/api/event_management/participants", method="POST",
                                           headers=dict(Authorization=str_event_id), body=request_body)
        response_body = json.loads(response.body.decode())
        self.assertEqual(response.code, 200)
        self.assertEqual(response_body["status"], 200)

        participant = yield db.participants.find_one({"_id": ObjectId(response_body["message"])})
        yield db.participants.remove({"_id": participant["_id"]})
        yield db.events.remove({"_id": event_id})
        self.assertEqual(response_body["message"], participant["_id"].__str__())
Exemple #6
0
    def test_220_jobs(self):
        client = AsyncHTTPClient()
        data = {
            'dataset_id': 'foo',
            'job_index': 0,
        }
        r = yield client.fetch('http://localhost:%d/jobs'%self.port,
                method='POST', body=json.dumps(data),
                headers={'Authorization': b'bearer '+self.token})
        self.assertEqual(r.code, 201)
        ret = json.loads(r.body)
        job_id = ret['result']

        data2 = {'status':'failed'}
        r = yield client.fetch('http://localhost:%d/datasets/%s/jobs/%s/status'%(self.port,data['dataset_id'],job_id),
                method='PUT', body=json.dumps(data2),
                headers={'Authorization': b'bearer '+self.token})
        self.assertEqual(r.code, 200)

        r = yield client.fetch('http://localhost:%d/datasets/%s/jobs/%s'%(self.port,data['dataset_id'],job_id),
                headers={'Authorization': b'bearer '+self.token})
        self.assertEqual(r.code, 200)
        ret = json.loads(r.body)
        self.assertIn('status', ret)
        self.assertEqual(ret['status'], 'failed')
Exemple #7
0
class ExpandSearch(Spiderman):
    """Expands youtube video network.
    """

    functions = 'expand'

    def search(self):

        self.done = False
        self.requests_made = 1
        self.network = {}

        # callback function parameters
        search_key = self.search_key = str(datetime.now())
        related_search = self.related_search
        client = self.client

        cb = lambda x: related_search(x, 0)

        video_ids = [str(k) for k in self.input_object['seed']]

        global pages
        global max_results

        self.http_client = AsyncHTTPClient()

        for video_id in video_ids:
            self.http_client.fetch("http://gdata.youtube.com/feeds/api/videos/{}/related?alt=jsonc&v=2".format(video_id),
                              callback=cb)
Exemple #8
0
def f():
    print "hi"
    client=AsyncHTTPClient()
    print "hello"
    res=yield client.fetch('http://gfsoso.com')
    print "world"
    print(res)
    def __sweep(self):
        """
        the function called by the cleanup service
        """
        #print 'candid: ' + str(len(self.__remove_candidates))
        self.__log.info('cleanup service - Sweep started')
        for sharing_secret in self.__remove_candidates:
            #cleanup all the listeners
            candidate = self.__remove_candidates[sharing_secret]
            #In case a lot of actions are waiting to be executed
            #and are clogged in the space, don't clean it up give it
            #a chance for another sweeping period
            if not candidate.is_being_processed():
                self.__log.info('cleanup service - cleaning candidate for %s' % sharing_secret)
                candidate.cleanup()
                #notify the load balancer of the cleanup
                http = AsyncHTTPClient()
                load_balancer = Properties.load_balancer_url
                url = '/'.join([load_balancer, 'SharingFactory',sharing_secret])
                http.fetch(url, method='DELETE', callback=None)
                #yield gen.Task(http.fetch, url, method = 'DELETE')
                #remove if from stored sharing spaces
                del(self.__sharing_spaces[sharing_secret])
            else:
                self.__log.info('cleanup service - skipping cleaning candidate for %s is being processed' % sharing_secret)


        #now nominate every one
        self.__remove_candidates.clear()
        for sharing_secret in self.__sharing_spaces:
            self.__remove_candidates[sharing_secret] = \
                self.__sharing_spaces[sharing_secret]
        self.__log.info('cleanup service - Sweep finished')
        self.timer = Timer(self.SWEEP_PERIOD, self.__sweep)
        self.timer.start()
Exemple #10
0
def run_crawler():
    """Run a crawler iteration"""
    http_client = AsyncHTTPClient()
    # request OVH availablility API asynchronously
    response = yield http_client.fetch(URL)
    response_json = json.loads(response.body.decode('utf-8'))
    if not response_json or not response_json['answer']:
        return
    availability = response_json['answer']['availability']
    for item in availability:
        # look for servers of required types in OVH availability list
        if SERVER_TYPES.get(item['reference']) in config['servers']:
            # make a flat list of zones where servers are available
            available_zones = [e['zone'] for e in item['zones']
                               if e['availability'] != 'unavailable']
            # iterate over all tacked zones and set availability states
            for zone in config['zones']:
                server = SERVER_TYPES[item['reference']]
                state_id = '%s_available_in_%s' % (server, zone)
                # update state for each tracked zone
                text = "Server %s is available in %s" % (server, zone)
                message = {
                    'title': "Server %s available" % server,
                    'text': text,
                    'url': "http://www.kimsufi.com/fr/index.xml"
                }
                update_state(state_id, zone in available_zones, message)
Exemple #11
0
    def test_api(self):
        print("Running test")
        self.mock_requests = {}
        # invoke service to be tested
        client = AsyncHTTPClient(self.io_loop)

        client.fetch(self.test_data['service_endpoint'], self.stop)
        

        mocks_with_assertions = [x for x in self.test_data['mocks'] if 'body' in x['mock']['request']]
        for mock in mocks_with_assertions:
            self.wait(timeout=30)
            self.assertEqual(flatten_text(self.mock_requests[mock['mock']['name']].decode("utf-8")), flatten_text(mock['mock']['request']['body']))
            #TODO: Assert request headers
        response = self.wait()
        # print(response)

        # perform assertions
        for assertion in self.test_data['assertions']:
            if 'http_code' in assertion:
                self.assertEqual(response.code, assertion['http_code'])
            if 'response' in assertion:
                self.assertEqual(flatten_text(response.body.decode("utf-8")), flatten_text(assertion['response']))
            if 'content-type' in assertion:
                self.assertEqual(response.headers['Content-Type'], assertion['content-type'])
Exemple #12
0
 def _search2(self, q, limit):
     data = []
     http_client = AsyncHTTPClient()
     requests = [HTTPRequest(url.format(quote(q)), connect_timeout=1.0, request_timeout=60.0)
                 for url, _ in self.search_interfaces2]
     responses = yield [http_client.fetch(r) for r in requests]
     i = 0
     for r in responses:
         j = json.loads(r.body.decode())
         n = self.search_interfaces2[i][1]
         i += 1
         if j['numFound'] > 0:
             docs = j['docs']
             if n == 'F10搜索':
                 stocks = j['filters'][0]['stock']
                 doclimit = limit['f10']
             else:
                 stocks = j['filters'][1]['stock']
                 if n == '研报搜索':
                     doclimit = limit['research']
                 elif n == '新闻搜索':
                     doclimit = limit['news']
                 else:
                     doclimit = limit['notice']
             data.append({
                 'name': n,
                 'data': {
                     'stock': stocks[:limit['related_stock']],
                     'docs': docs[:doclimit]
                 }
             })
     return data
Exemple #13
0
	def access_token_for_id(cls, id, callback):
		"""Returns the access token for an id, acquiring a new one if necessary."""
		token = Cache.get(cls.auth_cache_key_template % id)
		if token:
			return IOLoop.instance().add_callback(lambda: callback(token))

		# If we don't have an access token cached, see if we have a refresh token
		token = TokenIdMapping.lookup_refresh_token(id)
		if token:
			post_body = urllib.urlencode({
				'client_id': Config.get('oauth', 'client-id'),
				'client_secret': Config.get('oauth', 'client-secret'),
				'refresh_token': token,
				'grant_type': 'refresh_token',
			})
			http_client = AsyncHTTPClient()
			return http_client.fetch(
				'https://accounts.google.com/o/oauth2/token',
				lambda response: cls.on_refresh_complete(response, id, callback),
				method='POST',
				body=post_body,
				request_timeout=20.0,
				connect_timeout=15.0,
			)
		else:
			logging.error("Unable to update access token for %s, no refresh token stored.", id)
			return IOLoop.instance().add_callback(lambda: callback(None))
Exemple #14
0
def set_motion_detection(camera_id, enabled):
    from tornado.httpclient import HTTPRequest, AsyncHTTPClient
    
    thread_id = camera_id_to_thread_id(camera_id)
    if thread_id is None:
        return logging.error('could not find thread id for camera with id %s' % camera_id)
    
    if not enabled:
        _motion_detected[camera_id] = False
    
    logging.debug('%(what)s motion detection for camera with id %(id)s' % {
            'what': ['disabling', 'enabling'][enabled],
            'id': camera_id})
    
    url = 'http://127.0.0.1:7999/%(id)s/detection/%(enabled)s' % {
            'id': thread_id,
            'enabled': ['pause', 'start'][enabled]}
    
    def on_response(response):
        if response.error:
            logging.error('failed to %(what)s motion detection for camera with id %(id)s: %(msg)s' % {
                    'what': ['disable', 'enable'][enabled],
                    'id': camera_id,
                    'msg': utils.pretty_http_error(response)})
        
        else:
            logging.debug('successfully %(what)s motion detection for camera with id %(id)s' % {
                    'what': ['disabled', 'enabled'][enabled],
                    'id': camera_id})

    request = HTTPRequest(url, connect_timeout=_MOTION_CONTROL_TIMEOUT, request_timeout=_MOTION_CONTROL_TIMEOUT)
    http_client = AsyncHTTPClient()
    http_client.fetch(request, on_response)
Exemple #15
0
class TaskPool:
    def __init__(self, maxClients):
        self._ioloop = ioloop.IOLoop()
        self._httpClient = AsyncHTTPClient(self._ioloop, maxClients)
        self._taskNum = 0
        
    def run(self):
        self._check()
        self._ioloop.start()

    def spawn(self, request, callback, **kwargs):
        def wapped(response):
            self._taskNum -= 1
            try:
                callback(response)
            except:
                print 'spwan error:', traceback.format_exc()
                pass
                
        self._taskNum += 1
        self._httpClient.fetch(request, wapped, **kwargs)

    def _check(self):
        def callback():
            if self._taskNum == 0:
                self._ioloop.stop()

            return self._check()
                
        self._ioloop.add_callback(callback)
Exemple #16
0
 def call(self, callback, path, **params):
     """ Fetch a result and validate before calling callback """
     client = AsyncHTTPClient()
     params.setdefault("ywsid", self.key)
     args = urllib.urlencode(params)
     url = "%s%s?%s" % (BASE, path, args)
     return client.fetch(url, callback=self.wrap(callback))
Exemple #17
0
def get_motion_detection(camera_id, callback):
    from tornado.httpclient import HTTPRequest, AsyncHTTPClient
    
    thread_id = camera_id_to_thread_id(camera_id)
    if thread_id is None:
        error = 'could not find thread id for camera with id %s' % camera_id
        logging.error(error)
        return callback(error=error)

    url = 'http://127.0.0.1:7999/%(id)s/detection/status' % {'id': thread_id}
    
    def on_response(response):
        if response.error:
            return callback(error=utils.pretty_http_error(response))

        enabled = bool(response.body.lower().count('active'))
        
        logging.debug('motion detection is %(what)s for camera with id %(id)s' % {
                'what': ['disabled', 'enabled'][enabled],
                'id': camera_id})

        callback(enabled)

    request = HTTPRequest(url, connect_timeout=_MOTION_CONTROL_TIMEOUT, request_timeout=_MOTION_CONTROL_TIMEOUT)
    http_client = AsyncHTTPClient()
    http_client.fetch(request, callback=on_response)
def async_fetch_future(url):
    http_client = AsyncHTTPClient()
    my_future = Future()
    fetch_future = http_client.fetch(url)
    fetch_future.add_done_callback(lambda f: my_future.set_result(f.result()))

    return my_future
def asynchronous_fetch(url, callback):
    http_client = AsyncHTTPClient()

    def handle_res(res):
        callback(res.body)

    http_client.fetch(url, callback=handle_res)
Exemple #20
0
def make_request1(alert_type,ticker,msg_data,reg_ids,link):
    json_data = {
        "collapse_key" : alert_type, 
        "data" : {
            "data": msg_data,
            "ticker" : ticker,
            "link" : link,
        }, 
        "registration_ids": reg_ids,
    }


    url = 'https://android.googleapis.com/gcm/send'
    myKey = "" 
    data = json.dumps(json_data)
    headers = {'Content-Type': 'application/json', 'Authorization': 'key=%s' % myKey}
    
    http_client = AsyncHTTPClient()
    yield http_client.fetch(url, handle_request, data, headers)




     make_request("Thank you Note! ~%s"%users["name"],"Thank you for letting us know!",
    "Thanks for posting %s"%(event["name"]),[androids["reg_id"]],"findergpstracking://gpstracking#/app/setup")
Exemple #21
0
    def search(self, query, callback):
        def cb(response):
            # TODO: check response for errors

            data = json.load(response.buffer)
            entries = data['feed']['entry']

            results = []
            for entry in entries:
                meta = entry['media$group']

                # construct video URL (with autoplay enabled)
                video = 'https://www.youtube.com/embed/{0}?autoplay=1'.format(
                    meta['yt$videoid']['$t'])

                thumbnail = filter(lambda t: t['yt$name'] == 'default',
                    meta['media$thumbnail'])[0]
                thumbnail = thumbnail['url']

                result = SearchResult(meta['media$title']['$t'], video, self.id,
                    thumbnail)
                results.append(result)

            callback(results)

        # Youtube API documentation:
        # https://developers.google.com/youtube/2.0/developers_guide_protocol
        client = AsyncHTTPClient()
        qs = urlencode({
            'q':           query,
            'max-results': '5',
            'alt':         'json',
            'v':           '2'
        })
        client.fetch('https://gdata.youtube.com/feeds/api/videos/?' + qs, cb)
def test_broadcast(s, a, b):
    ss = HTTPScheduler(s)
    ss.listen(0)
    s.services['http'] = ss

    aa = HTTPWorker(a)
    aa.listen(0)
    a.services['http'] = aa
    a.service_ports['http'] = aa.port
    s.worker_info[a.address]['services']['http'] = aa.port

    bb = HTTPWorker(b)
    bb.listen(0)
    b.services['http'] = bb
    b.service_ports['http'] = bb.port
    s.worker_info[b.address]['services']['http'] = bb.port

    client = AsyncHTTPClient()

    a_response = yield client.fetch('http://localhost:%d/info.json' % aa.port)
    b_response = yield client.fetch('http://localhost:%d/info.json' % bb.port)
    s_response = yield client.fetch('http://localhost:%d/broadcast/info.json'
                                    % ss.port)
    assert (json.loads(s_response.body.decode()) ==
            {a.address: json.loads(a_response.body.decode()),
             b.address: json.loads(b_response.body.decode())})

    ss.stop()
    aa.stop()
    bb.stop()
def test_with_data(e, s, a, b):
    ss = HTTPScheduler(s)
    ss.listen(0)

    L = e.map(inc, [1, 2, 3])
    L2 = yield e._scatter(['Hello', 'world!'])
    yield _wait(L)

    client = AsyncHTTPClient()
    response = yield client.fetch('http://localhost:%d/memory-load.json' %
                                  ss.port)
    out = json.loads(response.body.decode())

    assert all(isinstance(v, int) for v in out.values())
    assert set(out) == {a.address, b.address}
    assert sum(out.values()) == sum(map(getsizeof,
                                        [1, 2, 3, 'Hello', 'world!']))

    response = yield client.fetch('http://localhost:%s/memory-load-by-key.json'
                                  % ss.port)
    out = json.loads(response.body.decode())
    assert set(out) == {a.address, b.address}
    assert all(isinstance(v, dict) for v in out.values())
    assert all(k in {'inc', 'data'} for d in out.values() for k in d)
    assert all(isinstance(v, int) for d in out.values() for v in d.values())

    assert sum(v for d in out.values() for v in d.values()) == \
            sum(map(getsizeof, [1, 2, 3, 'Hello', 'world!']))

    ss.stop()
Exemple #24
0
    def post(self):

        json_str = self.get_argument("json_msg")
        # print "json_str: ",json_str
        value_obj = json.loads(json_str)

        com_val = COMMAND_URL_DICT[value_obj["command"]]
        com_url = com_val[0]
        com_func = com_val[1]
        url = "http://115.28.143.67:" + str(PORT) + com_url
        print "---------------------------------------"
        print "request url: " + url
        print "request json: " + json_str
        print "---------------------------------------"

        if "GET" == com_func:
            request = HTTPRequest(url, "GET")
            http = AsyncHTTPClient()
            response = yield http.fetch(request)
            print "---------------------------------------"
            print "response json: " + response.body
            print "---------------------------------------"
            self.write(response.body)
        elif "POST" == com_func:
            request = HTTPRequest(url, "POST", body=json_str)
            http = AsyncHTTPClient()
            response = yield http.fetch(request)
            print "---------------------------------------"
            print "response json: " + response.body
            print "---------------------------------------"
            self.write(response.body)
        else:
            pass
Exemple #25
0
 def post(self, *args, **kwargs):
     # TODO
     # save the messages to local database
     # file = self.request.files['image'][0]
     # file_name = file["filename"]
     # image = file['body']
     text = self.get_argument("text")
     data = {
         "from": "Mailgun Sandbox <*****@*****.**>",
         "to": "<*****@*****.**>",
         "subject": "Hello Udaan",
         "text": text,
     }
     data = urlencode(data)
     client = AsyncHTTPClient()
     headers_object = HTTPHeaders({"X-Mailgun-Variables": dumps({"X-Mailgun-Variables": {"password": "******"}})})
     request_object = HTTPRequest("https://api.mailgun.net/v3/sandbox1713f24a60034b5ab5e7fa0ca2faa9b6.mailgun.org"
                                  "/messages",
                                  method="POST",
                                  headers=headers_object,
                                  body=data,
                                  auth_username="******",
                                  auth_password="******"
                                  )
     print(request_object.headers.get_list("X-Mailgun-Variables"))
     response = yield client.fetch(request_object)
     client.close()
     print(response)
     if response.code == 200:
         msg = "email send successfully"
         self.respond(msg, response.code)
     else:
         msg = "Please try again"
         self.respond(msg, response.code)
Exemple #26
0
 def w():
     http_client = AsyncHTTPClient()
     if req_cnt % 2 == 0:
         response = yield http_client.fetch("http://localhost:8889/wait/%s/%s" % (5, req_cnt))
     else:
         response = yield http_client.fetch("http://localhost:8890/wait/%s/%s" % (1, req_cnt))
     print ">>>> response >>>", response, response.body, handler, handler.req_cnt
Exemple #27
0
    def post(self):
        json_str = self.get_argument("json_msg")
        url = self.get_argument("url")
        method = self.get_argument("method")

        url = "http://115.28.143.67:" + str(PORT) + url

        print "------request url: " + url + "-----method:" + method

        if method == "POST" or method == "PUT":
            request = HTTPRequest(url, method, body=json_str)
            http = AsyncHTTPClient()
            response = yield http.fetch(request)

            print "------response json: " + response.body

            self.write(response.body)

        if method == "DELETE":
            request = HTTPRequest(url, method)
            http = AsyncHTTPClient()
            response = yield http.fetch(request)

            print "------response json: " + response.body

            self.write(response.body)
def test_with_status(e, s, a, b):
    ss = HTTPScheduler(s)
    ss.listen(0)

    client = AsyncHTTPClient()
    response = yield client.fetch('http://localhost:%d/tasks.json' %
                                  ss.port)
    out = json.loads(response.body.decode())
    assert out['total'] == 0
    assert out['processing'] == 0
    assert out['failed'] == 0
    assert out['in-memory'] == 0
    assert out['ready'] == 0
    assert out['waiting'] == 0

    L = e.map(div, range(10), range(10))
    yield _wait(L)

    client = AsyncHTTPClient()
    response = yield client.fetch('http://localhost:%d/tasks.json' %
                                  ss.port)
    out = json.loads(response.body.decode())
    assert out['failed'] == 1
    assert out['in-memory'] == 9
    assert out['ready'] == 0
    assert out['total'] == 10
    assert out['waiting'] == 0

    ss.stop()
Exemple #29
0
    def tsend_log(self,
            message,
            severity,
            filename=None,
            url=None,
            status_code=None,
            headers=None,
            parameters=None,
            stacktrace=False):
        from tornado.httpclient import AsyncHTTPClient

        d = self._build_message(
                message,
                severity,
                filename,
                url,
                status_code,
                headers,
                parameters,
                stacktrace)

        # want to use the better client here.
        AsyncHTTPClient.configure("tornado.curl_httpclient.CurlAsyncHTTPClient")

        AsyncHTTPClient().fetch(
                self._build_url(),
                lambda resp: None,
                method="POST",
                body=json.dumps(d),
                headers=headers)
Exemple #30
0
def test_unexpected_error(error, msg):
    class Handler(object):
        def handle(self, request):
            raise error

    inbound = HTTPInbound()
    inbound.start(Handler())

    client = AsyncHTTPClient()
    req = HTTPRequest(
        url='http://localhost:%s' % inbound.port,
        method='POST',
        headers={
            headers.CALLER: 'caller',
            headers.SERVICE: 'service',
            headers.PROCEDURE: 'procedure',
            headers.ENCODING: 'json',
            headers.TTL: '10000',
        },
        body='',
    )

    with pytest.raises(HTTPError) as e:
        yield client.fetch(req)

    e = e.value
    assert e.code >= 500 and e.code < 600
    assert e.response.body == msg
async def test_root_redirect(c, s, a, b):
    http_client = AsyncHTTPClient()
    response = await http_client.fetch("http://localhost:%d/" %
                                       s.http_server.port)
    assert response.code == 200
    assert "/status" in response.effective_url
def configure_tornado():
    from tornado.httpclient import AsyncHTTPClient

    AsyncHTTPClient.configure('tornado.curl_httpclient.CurlAsyncHTTPClient', max_clients=16)
Exemple #33
0
 def get(self):
     io_loop = self.request.connection.stream.io_loop
     client = AsyncHTTPClient(io_loop=io_loop)
     response = yield gen.Task(client.fetch, self.get_argument('url'))
     response.rethrow()
     self.finish(b("got response: ") + response.body)
Exemple #34
0

def camelCaseify(s):
    """convert snake_case to camelCase

    For the common case where some_value is set from someValue
    so we don't have to specify the name twice.
    """
    return re.sub(r"_([a-z])", lambda m: m.group(1).upper(), s)


# Configure JupyterHub to use the curl backend for making HTTP requests,
# rather than the pure-python implementations. The default one starts
# being too slow to make a large number of requests to the proxy API
# at the rate required.
AsyncHTTPClient.configure("tornado.curl_httpclient.CurlAsyncHTTPClient")

c.JupyterHub.spawner_class = "kubespawner.KubeSpawner"

# Connect to a proxy running in a different pod. Note that *_SERVICE_*
# environment variables are set by Kubernetes for Services
c.ConfigurableHTTPProxy.api_url = (
    f"http://proxy-api:{os.environ['PROXY_API_SERVICE_PORT']}"
)
c.ConfigurableHTTPProxy.should_start = False

# Do not shut down user pods when hub is restarted
c.JupyterHub.cleanup_servers = False

# Check that the proxy has routes appropriately setup
c.JupyterHub.last_activity_interval = 60
Exemple #35
0
 def __init__(self, client=None):
     self.client = client or AsyncHTTPClient()
     self.github_api_url = os.environ.get('GITHUB_API_URL', 'https://api.github.com/')
     self.authenticate()
 def get_client(self):
     client = AsyncHTTPClient()
     # client = HTTPClient()
     return client
Exemple #37
0
def up_xiamen_zyt(handler, partner):
    handler.up_req_time = time.localtime()

    timeStamp = time.strftime("%Y%m%d%H%M%S", time.localtime())
    requesttime = timeStamp
    msisdn = handler.mobile
    orderno = handler.order_id
    channelNo = partner['channelNo']
    key = partner["key"]

    productid = None
    k = 'private:telecom800:{carrier}:{price}'.format(carrier=handler.carrier, price=handler.price)
    productid = handler.slave.get(k)

    if productid is None:
        handler.up_result = 5003
        return handler.up_result

    data = 'channelNo={0}&msisdn={1}&orderno={2}&productid={4}&requesttime={3}&key={5}'.format(
            channelNo, msisdn, orderno, requesttime, productid, key)

    data_md5 = to_md5(data)
    bytesString = data_md5.encode(encoding="utf-8")

    sign = base64.b64encode(bytesString).decode('gb2312')

    body = {
        "channelNo": channelNo,
        "msisdn": msisdn,
        "productid": productid,
        "requesttime": requesttime,
        "orderno": orderno,
        "sign": sign
    }

    body = urlencode(body)

    url = partner["url_busi"]

    result = 9999
    up_result = None
    http_client = AsyncHTTPClient()
    try:
        request_log.info("REQU %s", body, extra={'orderid': handler.order_id})
        response = yield http_client.fetch(url, method='POST', body=body, request_timeout=120)

    except HTTPError as http_error:
        request_log.error('CALL UPSTREAM FAIL %s', http_error, extra={'orderid': handler.order_id})
        result = 60000 + http_error.code
        response = None

    except Exception as e:
        request_log.error('CALL UPSTREAM FAIL %s', e, extra={'orderid': handler.order_id})
        response = None
    finally:
        http_client.close()

    handler.up_resp_time = time.localtime()

    if response and response.code == 200:
        response_body = response.body.decode('utf8')
        request_log.info("RESP %s", response_body, extra={'orderid': handler.order_id})
        try:
            response_body = json.loads(response_body)
            up_result = response_body["resultcode"]
            handler.up_result = str(up_result)
            result = RESULT_MAP.get(up_result, 9)


        except Exception as e:
            result = 9999
            handler.up_result = result
            request_log.error('PARSE UPSTREAM %s', e, extra={'orderid': handler.order_id})
    return result
Exemple #38
0
    async def execute_http(cls, story, line, chain, command_conf):
        assert isinstance(chain, deque)
        assert isinstance(chain[0], Service)
        hostname = await Containers.get_hostname(story, line, chain[0].name)
        args = command_conf.get('arguments', {})
        body = {}
        query_params = {}
        path_params = {}

        form_fields_count = 0
        request_body_fields_count = 0

        for arg in args:
            value = story.argument_by_name(line, arg)
            location = args[arg].get('in', 'requestBody')
            if location == 'query':
                cls.smart_insert(story, line, command_conf, arg, value,
                                 query_params)
            elif location == 'path':
                cls.smart_insert(story, line, command_conf, arg, value,
                                 path_params)
            elif location == 'requestBody':
                cls.smart_insert(story, line, command_conf, arg, value, body)
                request_body_fields_count += 1
            elif location == 'formBody':
                # Created in StoryEventHandler.
                if isinstance(value, FileFormField):
                    body[arg] = FileFormField(arg, value.body, value.filename,
                                              value.content_type)
                else:
                    body[arg] = FormField(arg, value)
                form_fields_count += 1
            else:
                raise AsyncyError(
                    f'Invalid location for argument "{arg}" '
                    f'specified: {location}',
                    story=story,
                    line=line)

        if form_fields_count > 0 and request_body_fields_count > 0:
            raise AsyncyError(
                f'Mixed locations are not permitted. '
                f'Found {request_body_fields_count} fields, '
                f'of which '
                f'{form_fields_count} were in the form body',
                story=story,
                line=line)

        method = command_conf['http'].get('method', 'post')
        kwargs = {'method': method.upper()}

        content_type = command_conf['http'] \
            .get('contentType', 'application/json')

        if method.lower() == 'post':
            cls._fill_http_req_body(kwargs, content_type, body)
        elif len(body) > 0:
            raise AsyncyError(message=f'Parameters found in the request body, '
                              f'but the method is {method}',
                              story=story,
                              line=line)

        port = command_conf['http'].get('port', 5000)
        path = HttpUtils.add_params_to_url(
            command_conf['http']['path'].format(**path_params), query_params)
        url = f'http://{hostname}:{port}{path}'

        story.logger.debug(f'Invoking service on {url} with payload {kwargs}')

        client = AsyncHTTPClient()
        response = await HttpUtils.fetch_with_retry(3, story.logger, url,
                                                    client, kwargs)

        story.logger.debug(f'HTTP response code is {response.code}')
        if int(response.code / 100) == 2:
            content_type = response.headers.get('Content-Type')
            if content_type and 'application/json' in content_type:
                return ujson.loads(response.body)
            else:
                return cls.parse_output(command_conf, response.body, story,
                                        line, content_type)
        else:
            raise AsyncyError(message=f'Failed to invoke service!',
                              story=story,
                              line=line)
Exemple #39
0

def begin():
    global total, cnt
    conn = db.connect_torndb()

    source_companies = conn.query(
        "select * from source_company where source=13050 order by id desc limit %s,1000",
        cnt)
    if len(source_companies) == 0:
        logger.info("Finish.")
        exit()

    for source_company in source_companies:
        cnt += 1
        total += 1
        url = "http://www.lagou.com/gongsi/searchPosition.json?companyId=%s&pageSize=1000" % source_company[
            'sourceId']
        request(url,
                lambda r, source_company=source_company: handle_result(
                    r, source_company))
    conn.close()


if __name__ == "__main__":
    logger.info("Start...")

    AsyncHTTPClient.configure("tornado.curl_httpclient.CurlAsyncHTTPClient")
    http_client = AsyncHTTPClient(max_clients=30)
    begin()
    tornado.ioloop.IOLoop.instance().start()
Exemple #40
0
    async def when(cls, s: StreamingService, story, line: dict):
        service = line[LineConstants.service]
        command = line[LineConstants.command]
        conf = story.app.services[s.name][ServiceConstants.config]
        conf_event = Dict.find(conf, f'actions.{s.command}.events.{command}')

        port = Dict.find(conf_event, f'http.port', 80)
        subscribe_path = Dict.find(conf_event, 'http.subscribe.path')
        subscribe_method = Dict.find(conf_event, 'http.subscribe.method',
                                     'post')

        event_args = Dict.find(conf_event, 'arguments', {})

        data = {}
        for key in event_args:
            data[key] = story.argument_by_name(line, key)

        # HACK for http - send the DNS name of the app.
        if s.name == 'http':
            data['host'] = story.app.app_dns
        # END HACK for http.

        sub_url = f'http://{s.hostname}:{port}{subscribe_path}'

        story.logger.debug(f'Subscription URL - {sub_url}')

        engine = f'{story.app.config.ENGINE_HOST}:' \
                 f'{story.app.config.ENGINE_PORT}'

        query_params = urllib.parse.urlencode({
            'story': story.name,
            'block': line['ln'],
            'app': story.app.app_id
        })

        sub_id = str(uuid.uuid4())

        sub_body = {
            'endpoint': f'http://{engine}/story/event?{query_params}',
            'data': data,
            'event': command,
            'id': sub_id
        }

        body = {
            'sub_id': sub_id,
            'sub_url': sub_url,
            'sub_method': subscribe_method.upper(),
            'sub_body': sub_body,
            'pod_name': s.container_name,
            'app_id': story.app.app_id
        }

        # Why request_timeout is set to 120 seconds:
        # Since this is the Synapse, Synapse does multiple internal retries,
        # so we must set this to a really high value.
        kwargs = {
            'method': subscribe_method.upper(),
            'body': json.dumps(body),
            'headers': {
                'Content-Type': 'application/json; charset=utf-8'
            },
            'request_timeout': 120
        }

        client = AsyncHTTPClient()
        story.logger.debug(f'Subscribing to {service} '
                           f'from {s.command} via Synapse...')

        url = f'http://{story.app.config.ASYNCY_SYNAPSE_HOST}:' \
              f'{story.app.config.ASYNCY_SYNAPSE_PORT}' \
              f'/subscribe'

        # Okay to retry a request to the Synapse a hundred times.
        response = await HttpUtils.fetch_with_retry(100, story.logger, url,
                                                    client, kwargs)
        if int(response.code / 100) == 2:
            story.logger.debug(f'Subscribed!')
            story.app.add_subscription(sub_id, s, command, body)
        else:
            raise AsyncyError(
                message=f'Failed to subscribe to {service} from '
                f'{s.command} in {s.container_name}! '
                f'http err={response.error}; code={response.code}',
                story=story,
                line=line)
Exemple #41
0
import logging
from utils import ServerError
from tornado.escape import url_escape, url_unescape
from tornado.httpclient import AsyncHTTPClient, HTTPRequest, HTTPError
from tornado.httputil import HTTPHeaders
from typing import (TYPE_CHECKING, Callable, Optional, Tuple, Union, Dict,
                    List, Any)
if TYPE_CHECKING:
    from moonraker import Server
    from confighelper import ConfigHelper
    from io import BufferedWriter
    StrOrPath = Union[str, pathlib.Path]

MAX_BODY_SIZE = 512 * 1024 * 1024
AsyncHTTPClient.configure(None,
                          defaults=dict(user_agent="Moonraker"),
                          max_body_size=MAX_BODY_SIZE)

GITHUB_PREFIX = "https://api.github.com/"


def escape_query_string(qs: str) -> str:
    parts = qs.split("&")
    escaped: List[str] = []
    for p in parts:
        item = p.split("=", 1)
        key = url_escape(item[0])
        if len(item) == 2:
            escaped.append(f"{key}={url_escape(item[1])}")
        else:
            escaped.append(key)
Exemple #42
0
import pytest
import requests
from tornado.httpclient import AsyncHTTPClient, HTTPClientError
from tornado.testing import AsyncHTTPTestCase, gen_test
from unittest import TestCase
from re import search
from kafkahelpers import ReconnectingClient

import app
from utils import config
from tests.fixtures.fake_mq import FakeMQ
from tests.fixtures import StopLoopException
from utils.storage import s3 as s3_storage
from mock import patch

client = AsyncHTTPClient()


def cleanup():
    sh.rm(config.TOPIC_CONFIG)


class TestContentRegex(TestCase):
    """
    Test the content MIME type regex described in IPP 1.
    """
    def test_valid_mime_type(self):
        """
        A valid MIME type is correctly recognized.
        """
        mime_types = [
Exemple #43
0
def cull_idle(url,
              api_token,
              inactive_limit,
              cull_users=False,
              max_age=0,
              concurrency=10):
    """Shutdown idle single-user servers

    If cull_users, inactive *users* will be deleted as well.
    """
    auth_header = {'Authorization': 'token %s' % api_token}
    req = HTTPRequest(url=url + '/users', headers=auth_header)
    now = datetime.now(timezone.utc)
    client = AsyncHTTPClient()

    if concurrency:
        semaphore = Semaphore(concurrency)

        @coroutine
        def fetch(req):
            """client.fetch wrapped in a semaphore to limit concurrency"""
            yield semaphore.acquire()
            try:
                return (yield client.fetch(req))
            finally:
                yield semaphore.release()

    else:
        fetch = client.fetch

    resp = yield fetch(req)
    users = json.loads(resp.body.decode('utf8', 'replace'))
    futures = []

    @coroutine
    def handle_server(user, server_name, server):
        """Handle (maybe) culling a single server

        "server" is the entire server model from the API.

        Returns True if server is now stopped (user removable),
        False otherwise.
        """
        log_name = user['name']
        if server_name:
            log_name = '%s/%s' % (user['name'], server_name)
        if server.get('pending'):
            app_log.warning("Not culling server %s with pending %s", log_name,
                            server['pending'])
            return False

        # jupyterhub < 0.9 defined 'server.url' once the server was ready
        # as an *implicit* signal that the server was ready.
        # 0.9 adds a dedicated, explicit 'ready' field.
        # By current (0.9) definitions, servers that have no pending
        # events and are not ready shouldn't be in the model,
        # but let's check just to be safe.

        if not server.get('ready', bool(server['url'])):
            app_log.warning("Not culling not-ready not-pending server %s: %s",
                            log_name, server)
            return False

        if server.get('started'):
            age = now - parse_date(server['started'])
        else:
            # started may be undefined on jupyterhub < 0.9
            age = None

        # check last activity
        # last_activity can be None in 0.9
        if server['last_activity']:
            inactive = now - parse_date(server['last_activity'])
        else:
            # no activity yet, use start date
            # last_activity may be None with jupyterhub 0.9,
            # which introduces the 'started' field which is never None
            # for running servers
            inactive = age

        # CUSTOM CULLING TEST CODE HERE
        # Add in additional server tests here.  Return False to mean "don't
        # cull", True means "cull immediately", or, for example, update some
        # other variables like inactive_limit.
        #
        # Here, server['state'] is the result of the get_state method
        # on the spawner.  This does *not* contain the below by
        # default, you may have to modify your spawner to make this
        # work.  The `user` variable is the user model from the API.
        #
        # if server['state']['profile_name'] == 'unlimited'
        #     return False
        # inactive_limit = server['state']['culltime']

        should_cull = (inactive is not None
                       and inactive.total_seconds() >= inactive_limit)
        if should_cull:
            app_log.info("Culling server %s (inactive for %s)", log_name,
                         format_td(inactive))

        if max_age and not should_cull:
            # only check started if max_age is specified
            # so that we can still be compatible with jupyterhub 0.8
            # which doesn't define the 'started' field
            if age is not None and age.total_seconds() >= max_age:
                app_log.info(
                    "Culling server %s (age: %s, inactive for %s)",
                    log_name,
                    format_td(age),
                    format_td(inactive),
                )
                should_cull = True

        if not should_cull:
            app_log.debug(
                "Not culling server %s (age: %s, inactive for %s)",
                log_name,
                format_td(age),
                format_td(inactive),
            )
            return False

        if server_name:
            # culling a named server
            delete_url = url + "/users/%s/servers/%s" % (
                quote(user['name']),
                quote(server['name']),
            )
        else:
            delete_url = url + '/users/%s/server' % quote(user['name'])

        req = HTTPRequest(url=delete_url, method='DELETE', headers=auth_header)
        resp = yield fetch(req)
        if resp.code == 202:
            app_log.warning("Server %s is slow to stop", log_name)
            # return False to prevent culling user with pending shutdowns
            return False
        return True

    @coroutine
    def handle_user(user):
        """Handle one user.

        Create a list of their servers, and async exec them.  Wait for
        that to be done, and if all servers are stopped, possibly cull
        the user.
        """
        # shutdown servers first.
        # Hub doesn't allow deleting users with running servers.
        # jupyterhub 0.9 always provides a 'servers' model.
        # 0.8 only does this when named servers are enabled.
        if 'servers' in user:
            servers = user['servers']
        else:
            # jupyterhub < 0.9 without named servers enabled.
            # create servers dict with one entry for the default server
            # from the user model.
            # only if the server is running.
            servers = {}
            if user['server']:
                servers[''] = {
                    'last_activity': user['last_activity'],
                    'pending': user['pending'],
                    'url': user['server'],
                }
        server_futures = [
            handle_server(user, server_name, server)
            for server_name, server in servers.items()
        ]
        results = yield multi(server_futures)
        if not cull_users:
            return
        # some servers are still running, cannot cull users
        still_alive = len(results) - sum(results)
        if still_alive:
            app_log.debug(
                "Not culling user %s with %i servers still alive",
                user['name'],
                still_alive,
            )
            return False

        should_cull = False
        if user.get('created'):
            age = now - parse_date(user['created'])
        else:
            # created may be undefined on jupyterhub < 0.9
            age = None

        # check last activity
        # last_activity can be None in 0.9
        if user['last_activity']:
            inactive = now - parse_date(user['last_activity'])
        else:
            # no activity yet, use start date
            # last_activity may be None with jupyterhub 0.9,
            # which introduces the 'created' field which is never None
            inactive = age

        should_cull = (inactive is not None
                       and inactive.total_seconds() >= inactive_limit)
        if should_cull:
            app_log.info("Culling user %s (inactive for %s)", user['name'],
                         inactive)

        if max_age and not should_cull:
            # only check created if max_age is specified
            # so that we can still be compatible with jupyterhub 0.8
            # which doesn't define the 'started' field
            if age is not None and age.total_seconds() >= max_age:
                app_log.info(
                    "Culling user %s (age: %s, inactive for %s)",
                    user['name'],
                    format_td(age),
                    format_td(inactive),
                )
                should_cull = True

        if not should_cull:
            app_log.debug(
                "Not culling user %s (created: %s, last active: %s)",
                user['name'],
                format_td(age),
                format_td(inactive),
            )
            return False

        req = HTTPRequest(url=url + '/users/%s' % user['name'],
                          method='DELETE',
                          headers=auth_header)
        yield fetch(req)
        return True

    for user in users:
        futures.append((user['name'], handle_user(user)))

    for (name, f) in futures:
        try:
            result = yield f
        except Exception:
            app_log.exception("Error processing %s", name)
        else:
            if result:
                app_log.debug("Finished culling %s", name)
Exemple #44
0
class HttpClient:
    def __init__(self, config: ConfigHelper) -> None:
        self.server = config.get_server()
        self.client = AsyncHTTPClient()
        self.response_cache: Dict[str, HttpResponse] = {}

        self.gh_rate_limit: Optional[int] = None
        self.gh_limit_remaining: Optional[int] = None
        self.gh_limit_reset_time: Optional[float] = None

    def register_cached_url(self,
                            url: str,
                            etag: Optional[str] = None,
                            last_modified: Optional[str] = None) -> None:
        headers = HTTPHeaders()
        if etag is not None:
            headers["etag"] = etag
        if last_modified is not None:
            headers["last-modified"] = last_modified
        if len(headers) == 0:
            raise self.server.error(
                "Either an Etag or Last Modified Date must be specified")
        empty_resp = HttpResponse(url, 200, b"", headers, None)
        self.response_cache[url] = empty_resp

    def escape_url(self, url: str) -> str:
        # escape the url
        match = re.match(r"(https?://[^/?#]+)([^?#]+)?(\?[^#]+)?(#.+)?", url)
        if match is not None:
            uri, path, qs, fragment = match.groups()
            if path is not None:
                uri += "/".join(
                    [url_escape(p, plus=False) for p in path.split("/")])
            if qs is not None:
                uri += "?" + escape_query_string(qs[1:])
            if fragment is not None:
                uri += "#" + url_escape(fragment[1:], plus=False)
            url = uri
        return url

    async def request(self,
                      method: str,
                      url: str,
                      body: Optional[Union[str, List[Any], Dict[str,
                                                                Any]]] = None,
                      headers: Optional[Dict[str, Any]] = None,
                      connect_timeout: float = 5.,
                      request_timeout: float = 10.,
                      attempts: int = 1,
                      retry_pause_time: float = .1,
                      enable_cache: bool = False,
                      send_etag: bool = True,
                      send_if_modified_since: bool = True) -> HttpResponse:
        cache_key = url.split("?", 1)[0]
        method = method.upper()
        # prepare the body if required
        req_headers: Dict[str, Any] = {}
        if isinstance(body, (list, dict)):
            body = json.dumps(body)
            req_headers["Content-Type"] = "application/json"
        cached: Optional[HttpResponse] = None
        if enable_cache:
            cached = self.response_cache.get(cache_key)
            if cached is not None and send_etag:
                if cached.etag is not None and send_etag:
                    req_headers["If-None-Match"] = cached.etag
                if cached.last_modified and send_if_modified_since:
                    req_headers["If-Modified-Since"] = cached.last_modified
        if headers is not None:
            headers.update(req_headers)
        elif req_headers:
            headers = req_headers

        timeout = 1 + connect_timeout + request_timeout
        request = HTTPRequest(url,
                              method,
                              headers,
                              body=body,
                              request_timeout=request_timeout,
                              connect_timeout=connect_timeout)
        err: Optional[BaseException] = None
        for i in range(attempts):
            if i:
                await asyncio.sleep(retry_pause_time)
            try:
                fut = self.client.fetch(request, raise_error=False)
                resp = await asyncio.wait_for(fut, timeout)
            except asyncio.CancelledError:
                raise
            except Exception as e:
                err = e
            else:
                err = resp.error
                if resp.code == 304:
                    err = None
                    if cached is None:
                        if enable_cache:
                            logging.info(
                                "Request returned 304, however no cached "
                                "item was found")
                        result = b""
                    else:
                        logging.debug(f"Request returned from cache: {url}")
                        result = cached.content
                elif resp.error is not None and attempts - i != 1:
                    continue
                else:
                    result = resp.body
                ret = HttpResponse(url, resp.code, result, resp.headers, err)
                break
        else:
            ret = HttpResponse(url, 500, b"", HTTPHeaders(), err)
        if enable_cache and ret.is_cachable():
            logging.debug(f"Caching HTTP Response: {url}")
            self.response_cache[cache_key] = ret
        else:
            self.response_cache.pop(cache_key, None)
        return ret

    async def get(self,
                  url: str,
                  headers: Optional[Dict[str, Any]] = None,
                  **kwargs) -> HttpResponse:
        if "enable_cache" not in kwargs:
            kwargs["enable_cache"] = True
        return await self.request("GET", url, None, headers, **kwargs)

    async def post(self,
                   url: str,
                   body: Union[str, List[Any], Dict[str, Any]] = "",
                   headers: Optional[Dict[str, Any]] = None,
                   **kwargs) -> HttpResponse:
        return await self.request("POST", url, body, headers, **kwargs)

    async def delete(self,
                     url: str,
                     headers: Optional[Dict[str, Any]] = None,
                     **kwargs) -> HttpResponse:
        return await self.request("DELETE", url, None, headers, **kwargs)

    async def github_api_request(self,
                                 resource: str,
                                 attempts: int = 1,
                                 retry_pause_time: float = .1) -> HttpResponse:
        url = f"{GITHUB_PREFIX}{resource.strip('/')}"
        if (self.gh_limit_reset_time is not None
                and self.gh_limit_remaining == 0):
            curtime = time.time()
            if curtime < self.gh_limit_reset_time:
                reset_time = time.ctime(self.gh_limit_reset_time)
                raise self.server.error(f"GitHub Rate Limit Reached\n"
                                        f"Request: {url}\n"
                                        f"Limit Reset Time: {reset_time}")
        headers = {"Accept": "application/vnd.github.v3+json"}
        resp = await self.get(url,
                              headers,
                              attempts=attempts,
                              retry_pause_time=retry_pause_time)
        resp_hdrs = resp.headers
        if 'X-Ratelimit-Limit' in resp_hdrs:
            self.gh_rate_limit = int(resp_hdrs['X-Ratelimit-Limit'])
            self.gh_limit_remaining = int(resp_hdrs['X-Ratelimit-Remaining'])
            self.gh_limit_reset_time = float(resp_hdrs['X-Ratelimit-Reset'])
        return resp

    def github_api_stats(self) -> Dict[str, Any]:
        return {
            'github_rate_limit': self.gh_rate_limit,
            'github_requests_remaining': self.gh_limit_remaining,
            'github_limit_reset_time': self.gh_limit_reset_time,
        }

    async def get_file(
        self,
        url: str,
        content_type: str,
        connect_timeout: float = 5.,
        request_timeout: float = 180.,
        attempts: int = 1,
        retry_pause_time: float = .1,
        enable_cache: bool = False,
    ) -> bytes:
        headers = {"Accept": content_type}
        resp = await self.get(url,
                              headers,
                              connect_timeout=connect_timeout,
                              request_timeout=request_timeout,
                              attempts=attempts,
                              retry_pause_time=retry_pause_time,
                              enable_cache=enable_cache)
        resp.raise_for_status()
        return resp.content

    async def download_file(self,
                            url: str,
                            content_type: str,
                            destination_path: Optional[StrOrPath] = None,
                            download_size: int = -1,
                            progress_callback: Optional[Callable[
                                [int, int, int], None]] = None,
                            connect_timeout: float = 5.,
                            request_timeout: float = 180.,
                            attempts: int = 1,
                            retry_pause_time: float = 1.) -> pathlib.Path:
        for i in range(attempts):
            dl = StreamingDownload(self.server, destination_path,
                                   download_size, progress_callback)
            try:
                fut = self.client.fetch(url,
                                        headers={"Accept": content_type},
                                        connect_timeout=connect_timeout,
                                        request_timeout=request_timeout,
                                        streaming_callback=dl.on_chunk_recd,
                                        header_callback=dl.on_headers_recd)
                timeout = connect_timeout + request_timeout + 1.
                resp = await asyncio.wait_for(fut, timeout)
            except asyncio.CancelledError:
                raise
            except Exception:
                if i + 1 == attempts:
                    raise
                await asyncio.sleep(retry_pause_time)
                continue
            finally:
                await dl.close()
            if resp.code < 400:
                return dl.dest_file
        raise self.server.error(f"Retries exceeded for request: {url}")

    def close(self):
        self.client.close()
Exemple #45
0
class App(object):
    """This method can be overridden by the application. By default, it simply
       prints the event."""
    def switch_up(self, switch_id, ports):
        print "switch_up(switch_id=%s)" % switch_id

    """This method can be overridden by the application. By default, it simply
       prints the event."""

    def switch_down(self, switch_id):
        print "switch_down(switch_id=%s)" % switch_id

    """This method can be overridden by the application. By default, it simply
       prints the event."""

    def port_up(self, switch_id, port_id):
        print "port_up(switch_id=%s, port_id=%d)" % (switch_id, port_id)

    """This method can be overridden by the application. By default, it simply
       prints the event."""

    def port_down(self, switch_id, port_id):
        print "port_down(switch_id=%s, port_id=%d)" % (switch_id, port_id)

    """This method can be overridden by the application. By default, it simply
       prints the event and drops the packet."""

    def packet_in(self, switch_id, port_id, payload):
        print "packet_in(switch_id=%s, port_id=%d)" % (switch_id, port_id)
        self.pkt_out(switch_id, payload, [])

    def connected(self):
        print "established connection to Frenetic controller"

    def packet(self, payload, protocol):
        from ryu.lib.packet import packet
        pkt = packet.Packet(array.array('b', payload.data))
        for p in pkt:
            if p.protocol_name == protocol:
                return p
        return None

    def pkt_out(self,
                switch_id,
                payload,
                actions,
                in_port=None,
                policies=None):
        # Renamed actions to policies in the internal API to make it clearer, but
        # kept actions keyword for backward compatibility.
        _policies = policies if policies != None else actions
        msg = PacketOut(switch=switch_id,
                        payload=payload,
                        policies=_policies,
                        in_port=in_port)
        pkt_out_url = "http://%s:%s/pkt_out" % (self.frenetic_http_host,
                                                self.frenetic_http_port)
        request = HTTPRequest(pkt_out_url,
                              method='POST',
                              body=json.dumps(msg.to_json()))
        return self.__http_client.fetch(request)

    def config(self, compiler_options):
        config_url = "http://%s:%s/config" % (self.frenetic_http_host,
                                              self.frenetic_http_port)
        request = HTTPRequest(config_url,
                              method='POST',
                              body=json.dumps(compiler_options.to_json()))
        return self.__http_client.fetch(request)

    def run_response(self, ftr, callback):
        response = ftr.result()
        if (hasattr(response, 'buffer')):
            data = json.loads(response.buffer.getvalue())
            ps = int(data['packets'])
            bs = int(data['bytes'])
            callback([ps, bs])

    @return_future
    def query_helper(self, ftr, callback):
        f = partial(self.run_response, callback=callback)
        IOLoop.instance().add_future(ftr, f)

    def query(self, label):
        url = "http://%s:%s/query/%s" % (self.frenetic_http_host,
                                         self.frenetic_http_port, label)
        request = HTTPRequest(url, method='GET', request_timeout=0)
        response_future = self.__http_client.fetch(request)
        return self.query_helper(response_future)

    def run_port_stats(self, ftr, callback):
        response = ftr.result()
        if (hasattr(response, 'buffer')):
            data = json.loads(response.buffer.getvalue())
            callback(data)

    @return_future
    def port_stats_helper(self, ftr, callback):
        f = partial(self.run_port_stats, callback=callback)
        IOLoop.instance().add_future(ftr, f)

    # Returns a Future where the Result is a dictionary with the
    # following values: port_no, rx_packets, tx_packets, rx_bytes, tx_bytes
    # rx_dropped, tx_dropped, rx_errors, tx_errors, rx_frame_error, rx_over_err,
    # rx_crc_err, collisions. All of these values map to an integer
    def port_stats(self, switch_id, port_id):
        url = "http://%s:%s/port_stats/%s/%s" % (self.frenetic_http_host,
                                                 self.frenetic_http_port,
                                                 switch_id, port_id)
        request = HTTPRequest(url, method='GET', request_timeout=0)
        response_future = self.__http_client.fetch(request)
        return self.port_stats_helper(response_future)

    @gen.coroutine
    def current_switches(self):
        url = "http://%s:%s/current_switches" % (self.frenetic_http_host,
                                                 self.frenetic_http_port)
        req = HTTPRequest(url, method="GET", request_timeout=0)
        resp = yield self.__http_client.fetch(req)
        ret = dict((x["switch_id"], x["ports"]) for x in json.loads(resp.body))
        raise gen.Return(ret)

    def update(self, policy):
        pol_json = json.dumps(policy.to_json())
        url = "http://%s:%s/%s/update_json" % (
            self.frenetic_http_host, self.frenetic_http_port, self.client_id)
        request = HTTPRequest(url, method='POST', body=pol_json)
        return self.__http_client.fetch(request)

    def __init__(self):
        if not hasattr(self, 'client_id'):
            self.client_id = uuid.uuid4().hex
            print "No client_id specified. Using %s" % self.client_id
        if not hasattr(self, 'frenetic_http_host'):
            self.frenetic_http_host = "localhost"
        if not hasattr(self, 'frenetic_http_port'):
            self.frenetic_http_port = "9000"
        self.__http_client = AsyncHTTPClient()
        self.__connect()

    def __connect(self):
        url = "http://%s:%s/version" % (self.frenetic_http_host,
                                        self.frenetic_http_port)
        req = HTTPRequest(url, method='GET', request_timeout=0)
        resp_fut = self.__http_client.fetch(req)
        IOLoop.instance().add_future(resp_fut, self.__handle_connect)

    def __handle_connect(self, response_future):
        try:
            response = response_future.result()
            self.__poll_event()
            self.connected()
        except httpclient.HTTPError as e:
            if e.code == 599:
                print "Frenetic not running, re-trying...."
                one_second = timedelta(seconds=1)
                IOLoop.instance().add_timeout(one_second, self.__connect)
            else:
                raise e

    def start_event_loop(self):
        print "Starting the tornado event loop (does not return)."
        IOLoop.instance().start()

    def __poll_event(self):
        url = "http://%s:%s/%s/event" % (
            self.frenetic_http_host, self.frenetic_http_port, self.client_id)
        req = HTTPRequest(url, method='GET', request_timeout=0)
        resp_fut = self.__http_client.fetch(req)

        IOLoop.instance().add_future(resp_fut, self.__handle_event)

    def __handle_event(self, response):
        try:
            event = json.loads(response.result().body)
            # For some reason, port stats are leaking into the event queue, so
            # just get them out
            if isinstance(event, list) or 'type' not in event:
                typ = "UNKNOWN"
            else:
                typ = event['type']
            if typ == 'switch_up':
                switch_id = event['switch_id']
                ports = event['ports']
                self.switch_up(switch_id, ports)
            elif typ == 'switch_down':
                switch_id = event['switch_id']
                self.switch_down(switch_id)
            elif typ == 'port_up':
                switch_id = event['switch_id']
                port_id = event['port_id']
                self.port_up(switch_id, port_id)
            elif typ == 'port_down':
                switch_id = event['switch_id']
                port_id = event['port_id']
                self.port_down(switch_id, port_id)
            elif typ == 'packet_in':
                pk = PacketIn(event)
                self.packet_in(pk.switch_id, pk.port_id, pk.payload)

            self.__poll_event()

        except httpclient.HTTPError as e:
            if e.code == 599:
                print time.strftime(
                    "%c") + " Frenetic crashed, re-trying in 5 seconds...."
                five_seconds = timedelta(seconds=5)
                # We wait for a connect instead of going through the loop again.
                IOLoop.instance().add_timeout(five_seconds, self.__connect)
            else:
                raise e
Exemple #46
0
    'http://www.google.com', 'http://www.yandex.ru', 'http://www.python.org'
]


def handle_response(response):
    if response.error:
        print("Error:", response.error)
    else:
        url = response.request.url
        data = response.body
        print('{} : {}...({} bytes)'.format(url, data[:10], len(data)))

    ioloop.IOLoop.current().stop()


loop = ioloop.IOLoop.current()
# event loop 생성
http_client = AsyncHTTPClient()

for url in urls:
    loop.add_callback(http_client.fetch, url,
                      lambda resp: handle_response(resp))
    # event loop에 콜백 추가 : add_callback(func, args, lambda)
    loop.start()
    # event loop 실행, 네트워크 호출을 기다리는 동안 프로그램이 다른 일을 수행할 수 있도록 즉시 리턴이 됨

# 콜백 함수의 맨 처음 라인에서 에러를 확인하고 있음을 볼 수 있는데, 이는 콜백에서 예외를 발생시킬 수 없기 때문에 꼭 필요하다
# 콜백에서 어떤 예외라도 발생한다면, 이는 이벤트 루프와 프로그램을 빠져나가므로 모든 에러들은 예외를 일으키는(raise) 대신
# 위처럼 객체로서 전달되어야 한다. 따라서 에러를 처리하는 부분이 없다면, 모든 에러들을 묻혀갈 것임을 의미한다
# 다른 문제점은, 블로킹을 없애는 방법이 유일하게 콜백 뿐이라 매우 긴 콜백 체인을 만들 수 있다는 것이다
# 복잡하며 디버깅이 어렵다는 문제가 있다
Exemple #47
0
 async def get_baidu_v35(cls, request):
     http = AsyncHTTPClient()
     print('request to baidu')
     request = await http.fetch(request)
     print('request to baidu done!')
     return request.body
Exemple #48
0
 def request_get(self, url, params, callback, user_data=None, headers=None):
     url = url_concat(url,params)
     client  = AsyncHTTPClient()
     request = HTTPRequest(url,connect_timeout=10,headers=headers)
     self.log_debug(url)
     client.fetch(request,functools.partial(callback,user_data))
Exemple #49
0
def mock_asynchttpclient(request):
    """mock AsyncHTTPClient for recording responses"""
    AsyncHTTPClient.configure(MockAsyncHTTPClient)
    if not os.getenv('GITHUB_ACCESS_TOKEN'):
        load_mock_responses('api.github.com')
        load_mock_responses('zenodo.org')
Exemple #50
0
class AsyncAuthServiceProxy(object):
    def __init__(self,
                 service_url,
                 service_name=None,
                 timeout=HTTP_TIMEOUT,
                 reconnect_timeout=2,
                 reconnect_amount=5,
                 max_clients=10,
                 max_buffer_size=104857600):
        """
        :arg service_url: in format "http://{user}:{password}@{host}:{port}"
        :arg service_name: TBD
        :arg timeout: TBD
        :arg reconnect_timeout: TBD
        :arg reconnect_amount: TBD
        :arg int max_clients: max_clients is the number of concurrent
            requests that can be in progress. Look tornado's docs for
            SimpleAsyncHTTPClient
        :arg string max_buffer_size: is the number of bytes that can be
            read by IOStream. It defaults to 100mb. Look tornado's docs for
            SimpleAsyncHTTPClient
        """

        self.__service_url = service_url
        self.__reconnect_timeout = reconnect_timeout
        self.__reconnect_amount = reconnect_amount or 1
        self.__service_name = service_name
        self.__url = urlparse.urlparse(service_url)
        self.__http_client = AsyncHTTPClient(max_clients=max_clients,
                                             max_buffer_size=max_buffer_size)
        self.__id_count = 0
        (user, passwd) = (self.__url.username, self.__url.password)
        try:
            user = user.encode('utf8')
        except AttributeError:
            pass
        try:
            passwd = passwd.encode('utf8')
        except AttributeError:
            pass
        authpair = user + b':' + passwd
        self.__auth_header = b'Basic ' + base64.b64encode(authpair)

    def __getattr__(self, name):
        if name.startswith('__') and name.endswith('__'):
            # Python internal stuff
            raise AttributeError
        if self.__service_name is not None:
            name = "%s.%s" % (self.__service_name, name)
        return AsyncAuthServiceProxy(self.__service_url, name)

    @gen.coroutine
    def __call__(self, *args):
        self.__id_count += 1

        postdata = json.dumps({
            'version': '1.1',
            'method': self.__service_name,
            'params': args,
            'id': self.__id_count
        })
        headers = {
            'Host': self.__url.hostname,
            'User-Agent': USER_AGENT,
            'Authorization': self.__auth_header,
            'Content-type': 'application/json'
        }

        req = HTTPRequest(url=self.__service_url,
                          method="POST",
                          body=postdata,
                          headers=headers)

        for i in range(self.__reconnect_amount):
            try:
                if i > 0:
                    l.warning("Reconnect try #{0}".format(i + 1))
                response = yield self.__http_client.fetch(req)
                break
            except HTTPError:
                err_msg = 'Failed to connect to {0}:{1}'.format(
                    self.__url.hostname, self.__url.port)
                rtm = self.__reconnect_timeout
                if rtm:
                    err_msg += ". Waiting {0} seconds.".format(rtm)
                l.exception(err_msg)
                if rtm:
                    io_loop = ioloop.IOLoop.current()
                    yield gen.Task(io_loop.add_timeout, timedelta(seconds=rtm))
        else:
            l.error("Reconnect tries exceed.")
            return
        response = json.loads(response.body, parse_float=decimal.Decimal)

        if response['error'] is not None:
            raise JSONRPCException(response['error'])
        elif 'result' not in response:
            raise JSONRPCException({
                'code': -343,
                'message': 'missing JSON-RPC result'
            })
        else:
            raise gen.Return(response['result'])
Exemple #51
0
def async_webrequest(url):
    client = AsyncHTTPClient()
    response = yield gen.Task(client.fetch, url)
    data = response.body
    raise gen.Return(data)
Exemple #52
0
 def get_baidu(cls, request, from_name=None):
     http = AsyncHTTPClient()
     print('request to baidu from {}'.format(from_name))
     request = yield http.fetch(request)
     print('request to baidu done!')
     raise gen.Return(request.body)
        if self.request.streaming_callback:
            buffer = BytesIO()
        else:
            buffer = BytesIO(data)  # TODO: don't require one big string?
        response = HTTPResponse(original_request,
                                self.code, reason=getattr(self, 'reason', None),
                                headers=self.headers,
                                request_time=self.io_loop.time() - self.start_time,
                                start_time=self.start_wall_time,
                                buffer=buffer,
                                effective_url=self.request.url)
        self._run_callback(response)
        self._on_end_request()

    def _on_end_request(self):
        self.stream.close()

    def data_received(self, chunk):
        if self._should_follow_redirect():
            # We're going to follow a redirect so just discard the body.
            return
        if self.request.streaming_callback is not None:
            self.request.streaming_callback(chunk)
        else:
            self.chunks.append(chunk)


if __name__ == "__main__":
    AsyncHTTPClient.configure(SimpleAsyncHTTPClient)
    main()
Exemple #54
0
class TokenManager(object):
    def __init__(self,
                 token_endpoint,
                 client_id,
                 client_secret,
                 http_options=None,
                 **kwargs):

        self._token_endpoint = token_endpoint
        self._client_id = client_id
        self._client_secret = client_secret
        self.request_body = kwargs
        self._token = Token()
        self._http_options = http_options if http_options else {}
        self._http_client = AsyncHTTPClient()

    def _has_token(self):
        return self._token.is_valid()

    @gen.coroutine
    def get_token(self):
        if not self._has_token():
            yield self._update_token()
        raise gen.Return(self._token.access_token)

    @gen.coroutine
    def _get_token_data(self):
        token_data = yield self._request_token()
        raise gen.Return(token_data)

    def reset_token(self):
        self._token = Token()

    @gen.coroutine
    def _update_token(self):
        token_data = yield self._get_token_data()
        self._token = Token(token_data.get('access_token', ''),
                            token_data.get('expires_in', 0))

    @gen.coroutine
    def _request_token(self):
        if not self._token_endpoint:
            raise TokenError('Missing token endpoint')

        data = self.request_body
        if not data.get('grant_type'):
            data['grant_type'] = 'client_credentials'

        if data.get('has_body_authentication'):
            data['client_id'] = self._client_id
            data['client_secret'] = self._client_secret

        token_data = yield self._fetch(
            url=self._token_endpoint,
            method="POST",
            auth=(self._client_id, self._client_secret),
            data=data,
        )

        raise gen.Return(token_data)

    @gen.coroutine
    def _fetch(self, url, method="GET", data=None, auth=None):
        if type(data) == dict:
            data = urlencode(data)

        request_data = dict(url=url, headers={}, method=method, body=data)

        if auth is not None:
            try:
                passhash = b64encode(':'.join(auth).encode('ascii'))
            except TypeError as e:
                raise TokenError(
                    'Missing credentials (client_id:client_secret)', str(e))

            request_data['headers']['Authorization'] = (
                'Basic %s' % passhash.decode('utf-8'))

        request_data.update(self._http_options)
        request = HTTPRequest(**request_data)

        logger.debug('Request: %s %s', request.method, request.url)
        for header in request.headers:
            logger.debug('Header %s: %s', header, request.headers[header])

        try:
            response = yield self._http_client.fetch(request)
        except HTTPError as e:
            raise TokenHTTPError('Failed to request token', e.response)

        result = json.loads(response.body.decode("utf-8"))
        raise gen.Return(result)
Exemple #55
0
def main():
    """A simple test runner.

    This test runner is essentially equivalent to `unittest.main` from
    the standard library, but adds support for tornado-style option
    parsing and log formatting.

    The easiest way to run a test is via the command line::

        python -m tornado.testing tornado.test.stack_context_test

    See the standard library unittest module for ways in which tests can
    be specified.

    Projects with many tests may wish to define a test script like
    tornado/test/runtests.py.  This script should define a method all()
    which returns a test suite and then call tornado.testing.main().
    Note that even when a test script is used, the all() test suite may
    be overridden by naming a single test on the command line::

        # Runs all tests
        tornado/test/runtests.py
        # Runs one test
        tornado/test/runtests.py tornado.test.stack_context_test

    """
    from tornado.options import define, options, parse_command_line

    define('autoreload', type=bool, default=False,
           help="DEPRECATED: use tornado.autoreload.main instead")
    define('httpclient', type=str, default=None)
    define('exception_on_interrupt', type=bool, default=True,
           help=("If true (default), ctrl-c raises a KeyboardInterrupt "
                 "exception.  This prints a stack trace but cannot interrupt "
                 "certain operations.  If false, the process is more reliably "
                 "killed, but does not print a stack trace."))
    argv = [sys.argv[0]] + parse_command_line(sys.argv)

    if options.httpclient:
        from tornado.httpclient import AsyncHTTPClient
        AsyncHTTPClient.configure(options.httpclient)

    if not options.exception_on_interrupt:
        signal.signal(signal.SIGINT, signal.SIG_DFL)

    if __name__ == '__main__' and len(argv) == 1:
        print >> sys.stderr, "No tests specified"
        sys.exit(1)
    try:
        # In order to be able to run tests by their fully-qualified name
        # on the command line without importing all tests here,
        # module must be set to None.  Python 3.2's unittest.main ignores
        # defaultTest if no module is given (it tries to do its own
        # test discovery, which is incompatible with auto2to3), so don't
        # set module if we're not asking for a specific test.
        if len(argv) > 1:
            unittest.main(module=None, argv=argv)
        else:
            unittest.main(defaultTest="all", argv=argv)
    except SystemExit, e:
        if e.code == 0:
            logging.info('PASS')
        else:
            logging.error('FAIL')
        if not options.autoreload:
            raise
Exemple #56
0
def up_esai(handler, partner):
    handler.up_req_time = time.localtime()
    tsp = time.strftime("%Y-%m-%d %H:%M:%S", handler.up_req_time)
    # t2 = time.strftime("%Y%m%d%H%M%S", handler.up_req_time)
    price = handler.price

    in_order = 'IP%s%s%s' % (partner['user_number'], handler.order_id[1:15],
                             handler.order_id[-4:])

    # sign UserNumber + PhoneNumber+ Province + City + PhoneClass + PhoneMoney(正整数) +Time + Sign
    sign = signature(partner['user_number'], in_order, handler.order_id,
                     handler.mobile, 'Auto', 'Auto', 'Auto', price, 'None',
                     tsp, '600', partner['key'])

    body = '&'.join([
        'UserNumber=%s' % partner['user_number'],
        'InOrderNumber=%s' % in_order,
        'OutOrderNumber=%s' % handler.order_id,
        'PhoneNumber=%s' % handler.mobile, 'Province=Auto', 'City=Auto',
        'PhoneClass=Auto',
        'PhoneMoney=%s' % price, 'SellPrice=None',
        'StartTime=%s' % quote(tsp), 'TimeOut=600',
        'RecordKey=%s' % sign[0:16], 'Remark=--'
    ])

    url = partner['url.order']

    # print(handler.order_id + ":" + body)
    request_log.info('CALL_REQ %s', body, extra={'orderid': handler.order_id})

    # call & wait
    http_client = AsyncHTTPClient()
    try:
        response = yield http_client.fetch(url,
                                           method='POST',
                                           body=body,
                                           connect_timeout=60,
                                           request_timeout=60)
    except Exception as e:
        request_log.error('CALL UPSTREAM FAIL %s',
                          e,
                          extra={'orderid': handler.order_id})
        response = None
    finally:
        http_client.close()

    handler.up_resp_time = time.localtime()  # <--------------

    result = 9999
    if response and response.code == 200:
        body = response.body.decode('gbk')
        request_log.info('CALL_RESP %s',
                         body,
                         extra={'orderid': handler.order_id})

        try:
            root = et.fromstring(body)
            r = root.find('result').text

            if r == 'success':
                handler.up_order_id = root.find('inOrderNumber').text
                result = 0
            elif r in [
                    'sameorder', 'ordererr', 'attrerr', 'phoneerr', 'moneyerr',
                    'sellpriceerr', 'dberr'
            ]:
                result = 5003

            handler.up_result = result
        except Exception as e:
            result = 9999
            request_log.error('PARSE UPSTREAM %s',
                              e,
                              extra={'orderid': handler.order_id})

    return result
Exemple #57
0
def cull_idle(url, api_token, timeout, cull_users=False):
    """Shutdown idle single-user servers

    If cull_users, inactive *users* will be deleted as well.
    """
    auth_header = {'Authorization': 'token %s' % api_token}
    req = HTTPRequest(
        url=url + '/users',
        headers=auth_header,
    )
    now = datetime.datetime.utcnow()
    cull_limit = now - datetime.timedelta(seconds=timeout)
    client = AsyncHTTPClient()
    resp = yield client.fetch(req)
    users = json.loads(resp.body.decode('utf8', 'replace'))
    futures = []

    @coroutine
    def cull_one(user, last_activity):
        """cull one user"""

        # shutdown server first. Hub doesn't allow deleting users with running servers.
        if user['server']:
            app_log.info("Culling server for %s (inactive since %s)",
                         user['name'], last_activity)
            req = HTTPRequest(
                url=url + '/users/%s/server' % user['name'],
                method='DELETE',
                headers=auth_header,
            )
            resp = yield client.fetch(req)
            if resp.code == 202:
                msg = "Server for {} is slow to stop.".format(user['name'])
                if cull_users:
                    app_log.warning(msg + " Not culling user yet.")
                    # return here so we don't continue to cull the user
                    # which will fail if the server is still trying to shutdown
                    return
                app_log.warning(msg)
        if cull_users:
            app_log.info("Culling user %s (inactive since %s)", user['name'],
                         last_activity)
            req = HTTPRequest(
                url=url + '/users/%s' % user['name'],
                method='DELETE',
                headers=auth_header,
            )
            yield client.fetch(req)

    for user in users:
        if not user['server'] and not cull_users:
            # server not running and not culling users, nothing to do
            continue
        if not user['last_activity']:
            continue
        last_activity = parse_date(user['last_activity'])
        if last_activity < cull_limit:
            # user might be in a transition (e.g. starting or stopping)
            # don't try to cull if this is happening
            if user['pending']:
                app_log.warning("Not culling user %s with pending %s",
                                user['name'], user['pending'])
                continue
            futures.append((user['name'], cull_one(user, last_activity)))
        else:
            app_log.debug("Not culling %s (active since %s)", user['name'],
                          last_activity)

    for (name, f) in futures:
        yield f
        app_log.debug("Finished culling %s", name)
Exemple #58
0
def coroutine_fetch(url):
    http_client = AsyncHTTPClient()
    response = yield http_client.fetch(url)
    raise gen.Return(response.body)
def fetch_corotine(url):
    http_client = AsyncHTTPClient()
    res = yield http_client.fetch(url)
    raise gen.Return(res.body)
Exemple #60
0
class AsyncHTTPTestCase(AsyncTestCase):
    '''A test case that starts up an HTTP server.

    Subclasses must override get_app(), which returns the
    tornado.web.Application (or other HTTPServer callback) to be tested.
    Tests will typically use the provided self.http_client to fetch
    URLs from this server.

    Example::

        class MyHTTPTest(AsyncHTTPTestCase):
            def get_app(self):
                return Application([('/', MyHandler)...])

            def test_homepage(self):
                # The following two lines are equivalent to
                #   response = self.fetch('/')
                # but are shown in full here to demonstrate explicit use
                # of self.stop and self.wait.
                self.http_client.fetch(self.get_url('/'), self.stop)
                response = self.wait()
                # test contents of response
    '''
    def setUp(self):
        super(AsyncHTTPTestCase, self).setUp()
        self.__port = None

        self.http_client = AsyncHTTPClient(io_loop=self.io_loop)
        self._app = self.get_app()
        self.http_server = HTTPServer(self._app, io_loop=self.io_loop,
                                      **self.get_httpserver_options())
        self.http_server.listen(self.get_http_port(), address="127.0.0.1")

    def get_app(self):
        """Should be overridden by subclasses to return a
        tornado.web.Application or other HTTPServer callback.
        """
        raise NotImplementedError()

    def fetch(self, path, **kwargs):
        """Convenience method to synchronously fetch a url.

        The given path will be appended to the local server's host and port.
        Any additional kwargs will be passed directly to
        AsyncHTTPClient.fetch (and so could be used to pass method="POST",
        body="...", etc).
        """
        self.http_client.fetch(self.get_url(path), self.stop, **kwargs)
        return self.wait()

    def get_httpserver_options(self):
        """May be overridden by subclasses to return additional
        keyword arguments for HTTPServer.
        """
        return {}

    def get_http_port(self):
        """Returns the port used by the HTTPServer.

        A new port is chosen for each test.
        """
        if self.__port is None:
            self.__port = get_unused_port()
        return self.__port

    def get_url(self, path):
        """Returns an absolute url for the given path on the test server."""
        return 'http://localhost:%s%s' % (self.get_http_port(), path)

    def tearDown(self):
        self.http_server.stop()
        self.http_client.close()
        super(AsyncHTTPTestCase, self).tearDown()