コード例 #1
0
ファイル: remote.py プロジェクト: garyni/motioneye
def check_timelapse_movie(local_config, group, callback):
    scheme, host, port, username, password, path, camera_id = _remote_params(local_config)
    
    logging.debug('checking timelapse movie status for remote camera %(id)s on %(url)s' % {
            'id': camera_id,
            'url': pretty_camera_url(local_config)})
    
    request = _make_request(scheme, host, port, username, password, path + '/picture/%(id)s/timelapse/%(group)s/?check=true' % {
            'id': camera_id,
            'group': group})
    
    def on_response(response):
        if response.error:
            logging.error('failed to check timelapse movie status for remote camera %(id)s on %(url)s: %(msg)s' % {
                    'id': camera_id,
                    'url': pretty_camera_url(local_config),
                    'msg': utils.pretty_http_error(response)})

            return callback(error=utils.pretty_http_error(response))
        
        try:
            response = json.loads(response.body)

        except Exception as e:
            logging.error('failed to decode json answer from %(url)s: %(msg)s' % {
                    'url': pretty_camera_url(local_config),
                    'msg': unicode(e)})

            return callback(error=unicode(e))
        
        callback(response)

    http_client = AsyncHTTPClient()
    http_client.fetch(request, _callback_wrapper(on_response))
コード例 #2
0
ファイル: spiderman.py プロジェクト: Martikos/spiderman
class SeedSearch(Spiderman):
    """Searches YouTube using keywords/query.
    """

    functions = 'search'

    def search(self):

        self.done = False
        self.requests_made = 1

        # callback function parameters
        search_key = self.search_key = str(datetime.now())
        related_search = self.related_search
        client = self.client

        cb = lambda x: related_search(x, 0)

        keywords = [str(k) for k in self.input_object['seed']]
        search_query = '+'.join(keywords).replace(' ', '+')

        global pages
        global max_results

        self.http_client = AsyncHTTPClient()

        for start_index in range(1, pages):
            request_url = "http://gdata.youtube.com/feeds/api/videos?q={0}&orderby=relevance&alt=jsonc&v=2&max-results={1}&start-index={2}".format(
                    search_query,
                    max_results,
                    start_index*25)
            self.http_client.fetch(request_url, callback=cb)
コード例 #3
0
ファイル: jobs_test.py プロジェクト: WIPACrepo/iceprod
    def test_220_jobs(self):
        client = AsyncHTTPClient()
        data = {
            'dataset_id': 'foo',
            'job_index': 0,
        }
        r = yield client.fetch('http://localhost:%d/jobs'%self.port,
                method='POST', body=json.dumps(data),
                headers={'Authorization': b'bearer '+self.token})
        self.assertEqual(r.code, 201)
        ret = json.loads(r.body)
        job_id = ret['result']

        data2 = {'status':'failed'}
        r = yield client.fetch('http://localhost:%d/datasets/%s/jobs/%s/status'%(self.port,data['dataset_id'],job_id),
                method='PUT', body=json.dumps(data2),
                headers={'Authorization': b'bearer '+self.token})
        self.assertEqual(r.code, 200)

        r = yield client.fetch('http://localhost:%d/datasets/%s/jobs/%s'%(self.port,data['dataset_id'],job_id),
                headers={'Authorization': b'bearer '+self.token})
        self.assertEqual(r.code, 200)
        ret = json.loads(r.body)
        self.assertIn('status', ret)
        self.assertEqual(ret['status'], 'failed')
コード例 #4
0
ファイル: remote.py プロジェクト: garyni/motioneye
def get_timelapse_movie(local_config, key, group, callback):
    scheme, host, port, username, password, path, camera_id = _remote_params(local_config)
    
    logging.debug('downloading timelapse movie for remote camera %(id)s on %(url)s' % {
            'id': camera_id,
            'url': pretty_camera_url(local_config)})
    
    request = _make_request(scheme, host, port, username, password, path + '/picture/%(id)s/timelapse/%(group)s/?key=%(key)s' % {
            'id': camera_id,
            'group': group,
            'key': key},
            timeout=10 * settings.REMOTE_REQUEST_TIMEOUT)

    def on_response(response):
        if response.error:
            logging.error('failed to download timelapse movie for remote camera %(id)s on %(url)s: %(msg)s' % {
                    'id': camera_id,
                    'url': pretty_camera_url(local_config),
                    'msg': utils.pretty_http_error(response)})

            return callback(error=utils.pretty_http_error(response))

        callback({
            'data': response.body,
            'content_type': response.headers.get('Content-Type'),
            'content_disposition': response.headers.get('Content-Disposition')
        })

    http_client = AsyncHTTPClient()
    http_client.fetch(request, _callback_wrapper(on_response))
コード例 #5
0
ファイル: spiderman.py プロジェクト: Martikos/spiderman
class ExpandSearch(Spiderman):
    """Expands youtube video network.
    """

    functions = 'expand'

    def search(self):

        self.done = False
        self.requests_made = 1
        self.network = {}

        # callback function parameters
        search_key = self.search_key = str(datetime.now())
        related_search = self.related_search
        client = self.client

        cb = lambda x: related_search(x, 0)

        video_ids = [str(k) for k in self.input_object['seed']]

        global pages
        global max_results

        self.http_client = AsyncHTTPClient()

        for video_id in video_ids:
            self.http_client.fetch("http://gdata.youtube.com/feeds/api/videos/{}/related?alt=jsonc&v=2".format(video_id),
                              callback=cb)
コード例 #6
0
ファイル: api_tester.py プロジェクト: bentaljaard/apiTester
    def test_api(self):
        print("Running test")
        self.mock_requests = {}
        # invoke service to be tested
        client = AsyncHTTPClient(self.io_loop)

        client.fetch(self.test_data['service_endpoint'], self.stop)
        

        mocks_with_assertions = [x for x in self.test_data['mocks'] if 'body' in x['mock']['request']]
        for mock in mocks_with_assertions:
            self.wait(timeout=30)
            self.assertEqual(flatten_text(self.mock_requests[mock['mock']['name']].decode("utf-8")), flatten_text(mock['mock']['request']['body']))
            #TODO: Assert request headers
        response = self.wait()
        # print(response)

        # perform assertions
        for assertion in self.test_data['assertions']:
            if 'http_code' in assertion:
                self.assertEqual(response.code, assertion['http_code'])
            if 'response' in assertion:
                self.assertEqual(flatten_text(response.body.decode("utf-8")), flatten_text(assertion['response']))
            if 'content-type' in assertion:
                self.assertEqual(response.headers['Content-Type'], assertion['content-type'])
コード例 #7
0
    def __sweep(self):
        """
        the function called by the cleanup service
        """
        #print 'candid: ' + str(len(self.__remove_candidates))
        self.__log.info('cleanup service - Sweep started')
        for sharing_secret in self.__remove_candidates:
            #cleanup all the listeners
            candidate = self.__remove_candidates[sharing_secret]
            #In case a lot of actions are waiting to be executed
            #and are clogged in the space, don't clean it up give it
            #a chance for another sweeping period
            if not candidate.is_being_processed():
                self.__log.info('cleanup service - cleaning candidate for %s' % sharing_secret)
                candidate.cleanup()
                #notify the load balancer of the cleanup
                http = AsyncHTTPClient()
                load_balancer = Properties.load_balancer_url
                url = '/'.join([load_balancer, 'SharingFactory',sharing_secret])
                http.fetch(url, method='DELETE', callback=None)
                #yield gen.Task(http.fetch, url, method = 'DELETE')
                #remove if from stored sharing spaces
                del(self.__sharing_spaces[sharing_secret])
            else:
                self.__log.info('cleanup service - skipping cleaning candidate for %s is being processed' % sharing_secret)


        #now nominate every one
        self.__remove_candidates.clear()
        for sharing_secret in self.__sharing_spaces:
            self.__remove_candidates[sharing_secret] = \
                self.__sharing_spaces[sharing_secret]
        self.__log.info('cleanup service - Sweep finished')
        self.timer = Timer(self.SWEEP_PERIOD, self.__sweep)
        self.timer.start()
コード例 #8
0
ファイル: motionctl.py プロジェクト: Ethocreeper/motioneye
def set_motion_detection(camera_id, enabled):
    from tornado.httpclient import HTTPRequest, AsyncHTTPClient
    
    thread_id = camera_id_to_thread_id(camera_id)
    if thread_id is None:
        return logging.error('could not find thread id for camera with id %s' % camera_id)
    
    if not enabled:
        _motion_detected[camera_id] = False
    
    logging.debug('%(what)s motion detection for camera with id %(id)s' % {
            'what': ['disabling', 'enabling'][enabled],
            'id': camera_id})
    
    url = 'http://127.0.0.1:7999/%(id)s/detection/%(enabled)s' % {
            'id': thread_id,
            'enabled': ['pause', 'start'][enabled]}
    
    def on_response(response):
        if response.error:
            logging.error('failed to %(what)s motion detection for camera with id %(id)s: %(msg)s' % {
                    'what': ['disable', 'enable'][enabled],
                    'id': camera_id,
                    'msg': utils.pretty_http_error(response)})
        
        else:
            logging.debug('successfully %(what)s motion detection for camera with id %(id)s' % {
                    'what': ['disabled', 'enabled'][enabled],
                    'id': camera_id})

    request = HTTPRequest(url, connect_timeout=_MOTION_CONTROL_TIMEOUT, request_timeout=_MOTION_CONTROL_TIMEOUT)
    http_client = AsyncHTTPClient()
    http_client.fetch(request, on_response)
コード例 #9
0
ファイル: taskpool.py プロジェクト: heartshare/imagecrawler
class TaskPool:
    def __init__(self, maxClients):
        self._ioloop = ioloop.IOLoop()
        self._httpClient = AsyncHTTPClient(self._ioloop, maxClients)
        self._taskNum = 0
        
    def run(self):
        self._check()
        self._ioloop.start()

    def spawn(self, request, callback, **kwargs):
        def wapped(response):
            self._taskNum -= 1
            try:
                callback(response)
            except:
                print 'spwan error:', traceback.format_exc()
                pass
                
        self._taskNum += 1
        self._httpClient.fetch(request, wapped, **kwargs)

    def _check(self):
        def callback():
            if self._taskNum == 0:
                self._ioloop.stop()

            return self._check()
                
        self._ioloop.add_callback(callback)
コード例 #10
0
def asynchronous_fetch(url, callback):
    http_client = AsyncHTTPClient()

    def handle_res(res):
        callback(res.body)

    http_client.fetch(url, callback=handle_res)
コード例 #11
0
ファイル: motionctl.py プロジェクト: Ethocreeper/motioneye
def get_motion_detection(camera_id, callback):
    from tornado.httpclient import HTTPRequest, AsyncHTTPClient
    
    thread_id = camera_id_to_thread_id(camera_id)
    if thread_id is None:
        error = 'could not find thread id for camera with id %s' % camera_id
        logging.error(error)
        return callback(error=error)

    url = 'http://127.0.0.1:7999/%(id)s/detection/status' % {'id': thread_id}
    
    def on_response(response):
        if response.error:
            return callback(error=utils.pretty_http_error(response))

        enabled = bool(response.body.lower().count('active'))
        
        logging.debug('motion detection is %(what)s for camera with id %(id)s' % {
                'what': ['disabled', 'enabled'][enabled],
                'id': camera_id})

        callback(enabled)

    request = HTTPRequest(url, connect_timeout=_MOTION_CONTROL_TIMEOUT, request_timeout=_MOTION_CONTROL_TIMEOUT)
    http_client = AsyncHTTPClient()
    http_client.fetch(request, callback=on_response)
コード例 #12
0
def test_broadcast(s, a, b):
    ss = HTTPScheduler(s)
    ss.listen(0)
    s.services['http'] = ss

    aa = HTTPWorker(a)
    aa.listen(0)
    a.services['http'] = aa
    a.service_ports['http'] = aa.port
    s.worker_info[a.address]['services']['http'] = aa.port

    bb = HTTPWorker(b)
    bb.listen(0)
    b.services['http'] = bb
    b.service_ports['http'] = bb.port
    s.worker_info[b.address]['services']['http'] = bb.port

    client = AsyncHTTPClient()

    a_response = yield client.fetch('http://localhost:%d/info.json' % aa.port)
    b_response = yield client.fetch('http://localhost:%d/info.json' % bb.port)
    s_response = yield client.fetch('http://localhost:%d/broadcast/info.json'
                                    % ss.port)
    assert (json.loads(s_response.body.decode()) ==
            {a.address: json.loads(a_response.body.decode()),
             b.address: json.loads(b_response.body.decode())})

    ss.stop()
    aa.stop()
    bb.stop()
コード例 #13
0
ファイル: __init__.py プロジェクト: krmnn/wall
    def search(self, query, callback):
        def cb(response):
            # TODO: check response for errors

            data = json.load(response.buffer)
            entries = data['feed']['entry']

            results = []
            for entry in entries:
                meta = entry['media$group']

                # construct video URL (with autoplay enabled)
                video = 'https://www.youtube.com/embed/{0}?autoplay=1'.format(
                    meta['yt$videoid']['$t'])

                thumbnail = filter(lambda t: t['yt$name'] == 'default',
                    meta['media$thumbnail'])[0]
                thumbnail = thumbnail['url']

                result = SearchResult(meta['media$title']['$t'], video, self.id,
                    thumbnail)
                results.append(result)

            callback(results)

        # Youtube API documentation:
        # https://developers.google.com/youtube/2.0/developers_guide_protocol
        client = AsyncHTTPClient()
        qs = urlencode({
            'q':           query,
            'max-results': '5',
            'alt':         'json',
            'v':           '2'
        })
        client.fetch('https://gdata.youtube.com/feeds/api/videos/?' + qs, cb)
コード例 #14
0
def test_with_data(e, s, a, b):
    ss = HTTPScheduler(s)
    ss.listen(0)

    L = e.map(inc, [1, 2, 3])
    L2 = yield e._scatter(['Hello', 'world!'])
    yield _wait(L)

    client = AsyncHTTPClient()
    response = yield client.fetch('http://localhost:%d/memory-load.json' %
                                  ss.port)
    out = json.loads(response.body.decode())

    assert all(isinstance(v, int) for v in out.values())
    assert set(out) == {a.address, b.address}
    assert sum(out.values()) == sum(map(getsizeof,
                                        [1, 2, 3, 'Hello', 'world!']))

    response = yield client.fetch('http://localhost:%s/memory-load-by-key.json'
                                  % ss.port)
    out = json.loads(response.body.decode())
    assert set(out) == {a.address, b.address}
    assert all(isinstance(v, dict) for v in out.values())
    assert all(k in {'inc', 'data'} for d in out.values() for k in d)
    assert all(isinstance(v, int) for d in out.values() for v in d.values())

    assert sum(v for d in out.values() for v in d.values()) == \
            sum(map(getsizeof, [1, 2, 3, 'Hello', 'world!']))

    ss.stop()
コード例 #15
0
def test_with_status(e, s, a, b):
    ss = HTTPScheduler(s)
    ss.listen(0)

    client = AsyncHTTPClient()
    response = yield client.fetch('http://localhost:%d/tasks.json' %
                                  ss.port)
    out = json.loads(response.body.decode())
    assert out['total'] == 0
    assert out['processing'] == 0
    assert out['failed'] == 0
    assert out['in-memory'] == 0
    assert out['ready'] == 0
    assert out['waiting'] == 0

    L = e.map(div, range(10), range(10))
    yield _wait(L)

    client = AsyncHTTPClient()
    response = yield client.fetch('http://localhost:%d/tasks.json' %
                                  ss.port)
    out = json.loads(response.body.decode())
    assert out['failed'] == 1
    assert out['in-memory'] == 9
    assert out['ready'] == 0
    assert out['total'] == 10
    assert out['waiting'] == 0

    ss.stop()
コード例 #16
0
ファイル: client.py プロジェクト: thm-tech/forward
    def post(self):
        json_str = self.get_argument("json_msg")
        url = self.get_argument("url")
        method = self.get_argument("method")

        url = "http://115.28.143.67:" + str(PORT) + url

        print "------request url: " + url + "-----method:" + method

        if method == "POST" or method == "PUT":
            request = HTTPRequest(url, method, body=json_str)
            http = AsyncHTTPClient()
            response = yield http.fetch(request)

            print "------response json: " + response.body

            self.write(response.body)

        if method == "DELETE":
            request = HTTPRequest(url, method)
            http = AsyncHTTPClient()
            response = yield http.fetch(request)

            print "------response json: " + response.body

            self.write(response.body)
コード例 #17
0
ファイル: client.py プロジェクト: thm-tech/forward
    def post(self):

        json_str = self.get_argument("json_msg")
        # print "json_str: ",json_str
        value_obj = json.loads(json_str)

        com_val = COMMAND_URL_DICT[value_obj["command"]]
        com_url = com_val[0]
        com_func = com_val[1]
        url = "http://115.28.143.67:" + str(PORT) + com_url
        print "---------------------------------------"
        print "request url: " + url
        print "request json: " + json_str
        print "---------------------------------------"

        if "GET" == com_func:
            request = HTTPRequest(url, "GET")
            http = AsyncHTTPClient()
            response = yield http.fetch(request)
            print "---------------------------------------"
            print "response json: " + response.body
            print "---------------------------------------"
            self.write(response.body)
        elif "POST" == com_func:
            request = HTTPRequest(url, "POST", body=json_str)
            http = AsyncHTTPClient()
            response = yield http.fetch(request)
            print "---------------------------------------"
            print "response json: " + response.body
            print "---------------------------------------"
            self.write(response.body)
        else:
            pass
コード例 #18
0
def cull_idle(url, api_token, timeout):
    """cull idle single-user servers"""
    auth_header = {
            'Authorization': 'token %s' % api_token
        }
    req = HTTPRequest(url=url + '/api/users',
        headers=auth_header,
    )
    now = datetime.datetime.utcnow()
    cull_limit = now - datetime.timedelta(seconds=timeout)
    client = AsyncHTTPClient()
    resp = yield client.fetch(req)
    users = json.loads(resp.body.decode('utf8', 'replace'))
    futures = []
    for user in users:
        last_activity = parse_date(user['last_activity'])
        if user['server'] and last_activity < cull_limit:
            app_log.info("Culling %s (inactive since %s)", user['name'], last_activity)
            req = HTTPRequest(url=url + '/api/users/%s/server' % user['name'],
                method='DELETE',
                headers=auth_header,
            )
            futures.append((user['name'], client.fetch(req)))
        elif user['server'] and last_activity > cull_limit:
            app_log.debug("Not culling %s (active since %s)", user['name'], last_activity)

    for (name, f) in futures:
        yield f
        app_log.debug("Finished culling %s", name)
コード例 #19
0
ファイル: srv.py プロジェクト: Hipo/tornado_smack
 def w():
     http_client = AsyncHTTPClient()
     if req_cnt % 2 == 0:
         response = yield http_client.fetch("http://localhost:8889/wait/%s/%s" % (5, req_cnt))
     else:
         response = yield http_client.fetch("http://localhost:8890/wait/%s/%s" % (1, req_cnt))
     print ">>>> response >>>", response, response.body, handler, handler.req_cnt
コード例 #20
0
ファイル: remote.py プロジェクト: rafi1975/motioneye
def get_media_content(local_config, filename, media_type, callback):
    scheme, host, port, username, password, path, camera_id = _remote_params(local_config)

    logging.debug(
        "downloading file %(filename)s of remote camera %(id)s on %(url)s"
        % {"filename": filename, "id": camera_id, "url": pretty_camera_url(local_config)}
    )

    path += "/%(media_type)s/%(id)s/download/%(filename)s" % {
        "media_type": media_type,
        "id": camera_id,
        "filename": filename,
    }

    # timeout here is 10 times larger than usual - we expect a big delay when fetching the media list
    request = _make_request(scheme, host, port, username, password, path, timeout=10 * settings.REMOTE_REQUEST_TIMEOUT)

    def on_response(response):
        if response.error:
            logging.error(
                "failed to download file %(filename)s of remote camera %(id)s on %(url)s: %(msg)s"
                % {
                    "filename": filename,
                    "id": camera_id,
                    "url": pretty_camera_url(local_config),
                    "msg": utils.pretty_http_error(response),
                }
            )

            return callback(error=utils.pretty_http_error(response))

        return callback(response.body)

    http_client = AsyncHTTPClient()
    http_client.fetch(request, _callback_wrapper(on_response))
コード例 #21
0
ファイル: remote.py プロジェクト: rafi1975/motioneye
def del_media_group(local_config, group, media_type, callback):
    scheme, host, port, username, password, path, camera_id = _remote_params(local_config)

    logging.debug(
        "deleting group %(group)s of remote camera %(id)s on %(url)s"
        % {"group": group, "id": camera_id, "url": pretty_camera_url(local_config)}
    )

    path += "/%(media_type)s/%(id)s/delete_all/%(group)s/" % {"media_type": media_type, "id": camera_id, "group": group}

    request = _make_request(
        scheme, host, port, username, password, path, method="POST", data="{}", timeout=settings.REMOTE_REQUEST_TIMEOUT
    )

    def on_response(response):
        if response.error:
            logging.error(
                "failed to delete group %(group)s of remote camera %(id)s on %(url)s: %(msg)s"
                % {
                    "group": group,
                    "id": camera_id,
                    "url": pretty_camera_url(local_config),
                    "msg": utils.pretty_http_error(response),
                }
            )

            return callback(error=utils.pretty_http_error(response))

        callback()

    http_client = AsyncHTTPClient()
    http_client.fetch(request, _callback_wrapper(on_response))
コード例 #22
0
ファイル: api.py プロジェクト: NigelRook/superliminal
    def post(self):
        data = json_decode(self.request.body)
        logger.debug('Sonarr download: %s', data)
        event_type = data['EventType']
        if event_type in ['Test', 'Rename']:
            return

        http_client = AsyncHTTPClient()
        for episode in data['Episodes']:
            id = episode['Id']
            headers = {'X-Api-Key':env.settings.sonarr_api_key}

            request = HTTPRequest(
                method='GET', headers=headers,
                url='%s/api/Episode/%d' % (env.settings.sonarr_url, id))
            response = yield http_client.fetch(request)
            episode_data = json_decode(response.body)
            logger.debug('Sonarr episode data: %s', episode_data)

            file_id = episode_data['episodeFileId']
            request = HTTPRequest(
                method='GET', headers=headers,
                url='%s/api/EpisodeFile/%d' % (env.settings.sonarr_url, file_id))
            response = yield http_client.fetch(request)
            file_data = json_decode(response.body)
            logger.debug('Sonarr file data: %s', file_data)

            path = file_data['path']
            name = file_data['sceneName']+os.path.splitext(path)[1]
            logger.info("ADD (sonarr): %s -> %s", path, name)
            SuperliminalCore.add_video(path, name)
コード例 #23
0
def test_simple(s, a, b):
    server = HTTPWorker(a)
    server.listen(0)
    client = AsyncHTTPClient()

    response = yield client.fetch('http://localhost:%d/info.json' % server.port)
    response = json.loads(response.body.decode())
    assert response['ncores'] == a.ncores
    assert response['status'] == a.status

    response = yield client.fetch('http://localhost:%d/resources.json' %
            server.port)
    response = json.loads(response.body.decode())

    a.data['x'] = 1

    try:
        import psutil
        assert 0 < response['memory_percent'] < 100
    except ImportError:
        assert response == {}

    endpoints = ['/files.json', '/processing.json', '/nbytes.json',
                 '/nbytes-summary.json']
    for endpoint in endpoints:
        response = yield client.fetch(('http://localhost:%d' % server.port)
                                      + endpoint)
        response = json.loads(response.body.decode())
        assert response

    server.stop()
コード例 #24
0
    def sats_request(self, uri, callback, params=None, token=_ARG_DEFAULT, locale=None, api_version=2, **kwargs):
        if params is None:
            params = {}
        if token is self._ARG_DEFAULT:
            token = self.get_argument('t')
        if locale is None:
            locale = options.sats_api_default_locale

        url = '%s/%s/%s' % (
            options.sats_api_base_url.rstrip('/'),
            locale,
            uri.lstrip('/')
        )

        if token:
            params['atkn'] = token
        if api_version != 1:
            params['apiver'] = api_version

        if params:
            url += '?' + urllib.urlencode(params, True)

        all_kwargs = dict(
            user_agent='satscal.herokuapp.com',
        )
        all_kwargs.update(kwargs)

        logging.debug('Sending request to %s', url)

        client = AsyncHTTPClient()
        client.fetch(url, callback, **all_kwargs)
コード例 #25
0
ファイル: suggestions.py プロジェクト: rdefeo/api
    def post_suggest(self, user_id: str, application_id: str, session_id: str, locale: str, context: dict,
                     callback) -> str:
        self.logger.debug(
            "user_id=%s,application_id=%s,session_id=%s,locale=%s,"
            "context=%s",
            user_id, application_id, session_id, locale, context
        )

        url = "%s?session_id=%s&application_id=%s&locale=%s" % (
            SUGGEST_URL, session_id, application_id, locale
        )
        url += "&user_id=%s" % user_id if user_id is not None else ""

        try:
            request_body = {
                "context": context
            }

            http_client = AsyncHTTPClient()
            http_client.fetch(HTTPRequest(url=url, body=dumps(request_body), method="POST"), callback=callback)
            http_client.close()

        except HTTPError:
            self.logger.error("url=%s", url)
            raise
コード例 #26
0
ファイル: chat.py プロジェクト: ryr/hipochat
    def on_message(self, message):
        ts = get_utc_timestamp()
        message_dict = json.loads(message)
        message_dict.update({'timestamp': ts, 'author': self.profile})
        self.redis_client.zadd(self.chat_token, ts, json.dumps(message_dict))

        message_dict.update({'token': self.chat_token})
        pika_client.sample_message(json.dumps(message_dict))

        members = self.redis_client.smembers('%s-%s' % ('members', self.chat_token))
        members.discard(self.profile['token'])

        for other in members:
            # Increase notification count for users other than sender
            self.redis_client.incr('%s-%s-%s' % (REGULAR_MESSAGE_TYPE, self.chat_token, other))

        self.redis_client.set('%s-%s-%s' % (REGULAR_MESSAGE_TYPE, self.chat_token, self.profile['token']), 0)

        if REGULAR_MESSAGE_TYPE in NOTIFIABLE_MESSAGE_TYPES:
            headers = {'Authorization': 'Token %s' % self.profile['token'], 'content-type': 'application/json'}
            [members.discard(socket.profile['token']) for socket in websockets[self.chat_token]]

            data = {
                'chat_token': self.chat_token,
                'receivers': list(members),
                'body': message_dict['body'],
                'type': REGULAR_MESSAGE_TYPE,
                'author': message_dict['author']
            }

            client = AsyncHTTPClient()
            request = HTTPRequest(PUSH_NOTIFICATION_URL, body=json.dumps(data), headers=headers, method='POST')
            client.fetch(request, callback=push_notification_callback)
コード例 #27
0
ファイル: gitlab.py プロジェクト: hydroshare/oauthenticator
 def _check_group_whitelist(self, username, user_id, is_admin, access_token):
     http_client = AsyncHTTPClient()
     headers = _api_headers(access_token)
     if is_admin:
         # For admins, /groups returns *all* groups. As a workaround
         # we check if we are a member of each group in the whitelist
         for group in map(url_escape, self.gitlab_group_whitelist):
             url = "%s/groups/%s/members/%d" % (GITLAB_API, group, user_id)
             req = HTTPRequest(url, method="GET", headers=headers)
             resp = yield http_client.fetch(req, raise_error=False)
             if resp.code == 200:
                 return True  # user _is_ in group
     else:
         # For regular users we get all the groups to which they have access
         # and check if any of these are in the whitelisted groups
         next_page = url_concat("%s/groups" % GITLAB_API,
                                dict(all_available=True))
         while next_page:
             req = HTTPRequest(next_page, method="GET", headers=headers)
             resp = yield http_client.fetch(req)
             resp_json = json.loads(resp.body.decode('utf8', 'replace'))
             next_page = next_page_from_links(resp)
             user_groups = set(entry["path"] for entry in resp_json)
             # check if any of the organizations seen thus far are in whitelist
             if len(self.gitlab_group_whitelist & user_groups) > 0:
                 return True
         return False
コード例 #28
0
ファイル: motionctl.py プロジェクト: dermotduffy/motioneye
def take_snapshot(camera_id):
    from tornado.httpclient import HTTPRequest, AsyncHTTPClient

    thread_id = camera_id_to_thread_id(camera_id)
    if thread_id is None:
        return logging.error('could not find thread id for camera with id %s' % camera_id)

    logging.debug('taking snapshot for camera with id %(id)s' % {'id': camera_id})

    url = 'http://127.0.0.1:%(port)s/%(id)s/action/snapshot' % {
            'port': settings.MOTION_CONTROL_PORT,
            'id': thread_id}

    def on_response(response):
        if response.error:
            logging.error('failed to take snapshot for camera with id %(id)s: %(msg)s' % {
                    'id': camera_id,
                    'msg': utils.pretty_http_error(response)})

        else:
            logging.debug('successfully took snapshot for camera with id %(id)s' % {'id': camera_id})

    request = HTTPRequest(url, connect_timeout=_MOTION_CONTROL_TIMEOUT, request_timeout=_MOTION_CONTROL_TIMEOUT)
    http_client = AsyncHTTPClient()
    http_client.fetch(request, on_response)
コード例 #29
0
ファイル: remote.py プロジェクト: garyni/motioneye
def del_media_group(local_config, group, media_type, callback):
    scheme, host, port, username, password, path, camera_id = _remote_params(local_config)
    
    logging.debug('deleting group "%(group)s" of remote camera %(id)s on %(url)s' % {
            'group': group or 'ungrouped',
            'id': camera_id,
            'url': pretty_camera_url(local_config)})
    
    path += '/%(media_type)s/%(id)s/delete_all/%(group)s/' % {
            'media_type': media_type,
            'id': camera_id,
            'group': group}

    request = _make_request(scheme, host, port, username, password, path, method='POST', data='{}', timeout=settings.REMOTE_REQUEST_TIMEOUT)

    def on_response(response):
        if response.error:
            logging.error('failed to delete group "%(group)s" of remote camera %(id)s on %(url)s: %(msg)s' % {
                    'group': group or 'ungrouped',
                    'id': camera_id,
                    'url': pretty_camera_url(local_config),
                    'msg': utils.pretty_http_error(response)})
            
            return callback(error=utils.pretty_http_error(response))
        
        callback()

    http_client = AsyncHTTPClient()
    http_client.fetch(request, _callback_wrapper(on_response))
コード例 #30
0
ファイル: remote.py プロジェクト: rafi1975/motioneye
def set_preview(local_config, controls, callback):
    scheme, host, port, username, password, path, camera_id = _remote_params(local_config)

    logging.debug(
        "setting preview for remote camera %(id)s on %(url)s"
        % {"id": camera_id, "url": pretty_camera_url(local_config)}
    )

    data = json.dumps(controls)

    request = _make_request(
        scheme,
        host,
        port,
        username,
        password,
        path + "/config/%(id)s/set_preview/" % {"id": camera_id},
        method="POST",
        data=data,
    )

    def on_response(response):
        if response.error:
            logging.error(
                "failed to set preview for remote camera %(id)s on %(url)s: %(msg)s"
                % {"id": camera_id, "url": pretty_camera_url(local_config), "msg": utils.pretty_http_error(response)}
            )

            return callback(error=utils.pretty_http_error(response))

        callback()

    http_client = AsyncHTTPClient()
    http_client.fetch(request, _callback_wrapper(on_response))
コード例 #31
0
def http_callback_way(url1, url2):
    http_client = AsyncHTTPClient()
    http_client.fetch(url1, lambda res, u=url1: handle_result(res, u))
    print('%s here between to request' % time.time())
    http_client.fetch(url2, lambda res, u=url2: handle_result(res, u))
コード例 #32
0
 def async (self, url):
     async_client = AsyncHTTPClient()
     async_client.fetch(url, self._handle_response)
コード例 #33
0
 def bad_async_method(self, url):
     async_client = AsyncHTTPClient()
     async_client.fetch(url, self._handle_response)
     return 5
コード例 #34
0
ファイル: index.py プロジェクト: 981935539/tornado_01
 def get(self):
     """get请求方法"""
     client = AsyncHTTPClient()
     response = yield client.fetch("http://hq.sinajs.cn/list=sz000001")
     self.write(response.body.decode('gbk'))
コード例 #35
0
ファイル: mds_direct.py プロジェクト: shaitan/cocaine-tools
class MDSDirect(IPlugin):
    def __init__(self, proxy, config):
        super(MDSDirect, self).__init__(proxy)
        try:
            self.dist_info_endpoint = config["dist_info_endpoint"]
            self.mds_dist_info_endpoint = config["mds_dist_info_endpoint"]
            self.locator_port = config["locator_port"]
            self.filter_mds_stid = config.get("filter_stid", True)
            self.service_connect_timeout = timedelta(
                milliseconds=config.get("service_connect_timeout_ms", 1500))
            self.srw_httpclient = AsyncHTTPClient()
        except KeyError as err:
            raise PluginConfigurationError(self.name(),
                                           "option required %s" % err)

    @staticmethod
    def name():
        return "mds-direct"

    def match(self, request):
        if "X-Srw-Key" in request.headers and "X-Srw-Key-Type" in request.headers and "X-Srw-Namespace" in request.headers:
            key = request.headers["X-Srw-Key"]
            return not self.filter_mds_stid or is_mds_stid(key) or is_mds_key(
                key)
        return False

    @gen.coroutine
    def reelect_app(self, request, app):
        """tries to connect to the same app on differnet host from dist-info"""

        # disconnect app explicitly to break possibly existing connection
        app.disconnect()
        endpoints_size = len(app.locator.endpoints)

        # try x times, where x is the number of different endpoints in app locator.
        for _ in xrange(0, endpoints_size + 1):
            # last chance to take app from common pool
            if len(app.locator.endpoints) == 0:
                request.logger.info(
                    "giving up on connecting to dist-info hosts, falling back to common pool processing"
                )
                app = yield self.proxy.reelect_app(request, app)
                raise gen.Return(app)

            try:
                # always create new locator to prevent locking as we do connect with timeout
                # however lock can be still held during TCP timeout
                locator = Locator(endpoints=app.locator.endpoints)
                request.logger.info("connecting to locator %s",
                                    locator.endpoints[0])

                # first try to connect to locator only on remote host with timeout
                yield gen.with_timeout(self.service_connect_timeout,
                                       locator.connect())
                request.logger.debug("connected to locator %s for %s",
                                     locator.endpoints[0], app.name)
                app = Service(app.name,
                              locator=locator,
                              timeout=RESOLVE_TIMEOUT)

                # try to resolve and connect to application itself
                yield gen.with_timeout(self.service_connect_timeout,
                                       app.connect())
                request.logger.debug("connected to application %s via %s",
                                     app.name, app.endpoints)
            except gen.TimeoutError:
                # on timeout try next endpoint first
                request.logger.warning(
                    "timed out while connecting to application")
                continue
            except ServiceError as err:
                request.logger.warning("got error while resolving app - %s",
                                       err)
                if err.category in LOCATORCATEGORY and err.code == ESERVICENOTAVAILABLE:
                    # if the application is down - also try next endpoint
                    continue
                else:
                    raise err
            finally:
                # drop first endpoint to start next connection from different endpoint
                # we do this, as default logic of connection attempts in locator do not fit here
                app.locator.endpoints = app.locator.endpoints[1:]
            # return connected app
            raise gen.Return(app)
        raise PluginApplicationError(42, 42,
                                     "could not connect to application")

    def is_stid_request(self, request):
        return request.headers["X-Srw-Key-Type"].upper() == "STID"

    @gen.coroutine
    def process(self, request):
        mds_request_headers = httputil.HTTPHeaders()
        if "Authorization" in request.headers:
            mds_request_headers["Authorization"] = request.headers[
                "Authorization"]

        traceid = getattr(request, "traceid", None)
        if traceid is not None:
            mds_request_headers["X-Request-Id"] = traceid

        key = request.headers["X-Srw-Key"]

        name, event = extract_app_and_event(request)
        self.proxy.setup_tracing(request, name)
        timeout = self.proxy.get_timeout(name, event)
        name = self.proxy.resolve_group_to_version(name)
        if self.is_stid_request(request):
            url = "%s/gate/dist-info/%s?primary-only" % (
                self.dist_info_endpoint, key)
            request.logger.debug(
                "fetching endpoints via mulcagate dist-info - %s", url)
            srw_request = HTTPRequest(url,
                                      method="GET",
                                      headers=mds_request_headers,
                                      allow_ipv6=True,
                                      request_timeout=timeout)
        else:
            url = "%s/dist-info-%s/%s" % (self.mds_dist_info_endpoint,
                                          request.headers["X-Srw-Namespace"],
                                          key)
            request.logger.debug("fetching endpoints via mds dist-info - %s",
                                 url)
            srw_request = HTTPRequest(url,
                                      method="GET",
                                      headers=mds_request_headers,
                                      allow_ipv6=True,
                                      request_timeout=timeout)

        endpoints = yield self.fetch_mds_endpoints(request, srw_request)
        locator = Locator(endpoints=endpoints)
        app = Service(name, locator=locator, timeout=RESOLVE_TIMEOUT)
        request.logger.info("connecting to app %s", name)
        app = yield self.reelect_app(request, app)
        # TODO: attempts should be configurable
        yield self.proxy.process(request, name, app, event,
                                 pack_httprequest(request), self.reelect_app,
                                 4, timeout)

    def decode_mulca_dist_info(self, body):
        lines = body.split("\n")
        endpoints = [(line.split()[0], self.locator_port) for line in lines
                     if line]
        shuffle(endpoints)
        return endpoints

    def decode_mds_dist_info(self, body):
        obj = json.loads(body)
        endpoints = [(x['host'], self.locator_port) for x in obj['primary']]
        shuffle(endpoints)
        return endpoints

    @gen.coroutine
    def fetch_mds_endpoints(self, request, srw_request):
        try:
            # NOTE: we can do it in a streaming way
            resp = yield self.srw_httpclient.fetch(srw_request)
            body = resp.buffer.read(None)
            if self.is_stid_request(request):
                raise gen.Return(self.decode_mulca_dist_info(body))
            else:
                raise gen.Return(self.decode_mds_dist_info(body))

        except HTTPError as err:
            if err.code == 404:
                raise PluginNoSuchApplication("404")

            if err.code == 500:
                raise PluginApplicationError(42, 42, "500")

            if err.code == 401:
                fill_response_in(request, err.code,
                                 httplib.responses.get(err.code, httplib.OK),
                                 err.response.body, err.response.headers)
                return

            raise err
コード例 #36
0
def up_yuechen(handler, partner):
    """联动通讯
        oid	商家订单编号	32	不可空	商户系统自己生成的订单号
        cid	商家编号	20	不可空	商户注册的时候产生的一个商户ID
        pr	单位面值	10	不可空	您所充值的单位商品面值
        nb	商品数量	1	不可空	您所需要充值的充值数量(固定为1)
        fm	充值金额	10	不可空	充值金额=商品面值*商品数量
        pn	充值号码	11	不可空	您所需要充值的帐号信息
        ct	充值类型		可以空	充值类型,缺省为快充
        ru	通知地址		不可空	通知地址,根据协议2.4充值结果通知,返回充值结果
        tsp	时间戳	14	不可空	请求时间戳,格式yyyyMMddHHmmss
        info1	扩展参数I	128	可以空
        info1	扩展参数II	128	可以空
        info1	扩展参数III	128	可以空
        sign	签名		不可空	原串拼接规则:
        md5({oid}{cid}{pr}{nb}{fm}{pn}{ru}{tsp}{key})
    """
    handler.up_req_time = time.localtime()
    tsp = time.strftime("%Y%m%d%H%M%S", time.localtime())

    # print(partner['key'])
    # sign md5({oid}{cid}{pr}{nb}{fm}{pn}{ru}{tsp}{key})
    sign = signature(handler.order_id, partner['cid'], handler.price, '1',
                     handler.price, handler.mobile, partner['ru'], tsp,
                     partner['key'])

    # package
    url = '{url}?oid={oid}&cid={cid}&pr={pr}&nb=1&fm={pr}&pn={pn}&ru={ru}&tsp={tsp}&sign={sign}'.format(
        url=partner['url.order'],
        oid=handler.order_id,
        cid=partner['cid'],
        pr=handler.price,
        pn=handler.mobile,
        # ru=urllib.parse.quote(partner['ru']),
        ru=partner['ru'],
        tsp=tsp,
        sign=sign)

    request_log.info('CALL_REQ YUECHEN %s',
                     url,
                     extra={'orderid': handler.order_id})

    # call & wait
    http_client = AsyncHTTPClient()

    try:
        response = yield http_client.fetch(url,
                                           connect_timeout=30,
                                           request_timeout=30)

    except Exception as e:
        request_log.error('CALL UPSTREAM FAIL %s',
                          e,
                          extra={'orderid': handler.order_id})
        response = None
    finally:
        http_client.close()

    result = 9999
    if response and response.code == 200:
        body = response.body.decode('gbk')
        request_log.info('CALL_RESP (%d) %s',
                         response.code,
                         body,
                         extra={'orderid': handler.order_id})

        root = ElementTree.fromstring(body)
        if root.find('result').text == 'true' and root.find(
                'code').text == '100':

            result = int(root.find('code').text)
            handler.up_order_id = root.find('data/sid').text
            handler.result = RESULT_MAP.get(result, result)
        else:
            result = int(root.find('code').text)
            handler.result = RESULT_MAP.get(result, result)

    return handler.result
コード例 #37
0
    def authenticate(self, handler, data=None):
        code = handler.get_argument("code")
        # TODO: Configure the curl_httpclient for tornado
        http_client = AsyncHTTPClient()

        params = dict(redirect_uri=self.get_callback_url(handler),
                      code=code,
                      grant_type='authorization_code')
        params.update(self.extra_params)

        if self.token_url:
            url = self.token_url
        else:
            raise ValueError(
                "Please set the OAUTH2_TOKEN_URL environment variable")

        b64key = base64.b64encode(
            bytes("{}:{}".format(self.client_id, self.client_secret), "utf8"))

        headers = {
            "Accept": "application/json",
            "User-Agent": "JupyterHub",
            "Authorization": "Basic {}".format(b64key.decode("utf8"))
        }
        req = HTTPRequest(
            url,
            method="POST",
            headers=headers,
            validate_cert=self.tls_verify,
            body=urllib.parse.urlencode(
                params)  # Body is required for a POST...
        )

        resp = yield http_client.fetch(req)

        resp_json = json.loads(resp.body.decode('utf8', 'replace'))

        access_token = resp_json['access_token']
        refresh_token = resp_json.get('refresh_token', None)
        token_type = resp_json['token_type']
        scope = resp_json.get('scope', '')
        if (isinstance(scope, str)):
            scope = scope.split(' ')

        # Determine who the logged in user is
        headers = {
            "Accept": "application/json",
            "User-Agent": "JupyterHub",
            "Authorization": "{} {}".format(token_type, access_token)
        }
        if self.userdata_url:
            url = url_concat(self.userdata_url, self.userdata_params)
        else:
            raise ValueError(
                "Please set the OAUTH2_USERDATA_URL environment variable")

        req = HTTPRequest(
            url,
            method=self.userdata_method,
            headers=headers,
            validate_cert=self.tls_verify,
        )
        resp = yield http_client.fetch(req)
        resp_json = json.loads(resp.body.decode('utf8', 'replace'))

        if not resp_json.get(self.username_key):
            self.log.error("OAuth user contains no key %s: %s",
                           self.username_key, resp_json)
            return

        return {
            'name': resp_json.get(self.username_key),
            'auth_state': {
                'access_token': access_token,
                'refresh_token': refresh_token,
                'oauth_user': resp_json,
                'scope': scope,
            }
        }
コード例 #38
0
def up_xiaowo(handler, partner):
    handler.up_req_time = time.localtime()

    timeStamp = time.strftime("%Y%m%d%H%M%S", time.localtime())
    cpid = partner["cpid"]
    usercode = handler.mobile
    secretkey = partner["secretkey"]
    data = cpid + '|' + usercode + '|' + timeStamp + '|' + secretkey
    password = md5(data)

    k = 'private:xiaowo:{carrier}:{price}'.format(carrier=handler.carrier,
                                                  price=handler.price)
    if handler.scope and handler.scope != '0':
        k = k + ':' + handler.scope

    pid = handler.slave.get(k)

    if pid is None:
        handler.up_result = 5003
        return handler.up_result

    body = CODE.format(serviceid='Asynorder',
                       cpid=cpid,
                       usercode=usercode,
                       password=password,
                       pid=pid,
                       eftype='0',
                       reqordernum=handler.order_id,
                       timeStamp=timeStamp,
                       gamecode='')
    url = partner["url_busi"]
    url = url + '?' + body

    result = 9999
    http_client = AsyncHTTPClient()
    try:
        request_log.info("REQU %s", body, extra={'orderid': handler.order_id})
        response = yield http_client.fetch(url,
                                           method='GET',
                                           request_timeout=120)

    except HTTPError as http_error:
        request_log.error('CALL UPSTREAM FAIL %s',
                          http_error,
                          extra={'orderid': handler.order_id})
        result = 60000 + http_error.code
        response = None

    except Exception as e:
        request_log.error('CALL UPSTREAM FAIL %s',
                          e,
                          extra={'orderid': handler.order_id})
        response = None
    finally:
        http_client.close()

    handler.up_resp_time = time.localtime()

    if response and response.code == 200:
        response_body = response.body.decode('utf8')
        request_log.info("RESP %s",
                         response_body,
                         extra={'orderid': handler.order_id})
        try:
            response_body = json.loads(response_body)
            resp_code = response_body["returncode"]
            result = RESULT_MAP.get(resp_code, 0)
            handler.up_result = str(result)

        except Exception as e:
            result = 9999
            handler.up_result = result
            request_log.error('PARSE UPSTREAM %s',
                              e,
                              extra={'orderid': handler.order_id})
    return result
コード例 #39
0
 def get(self, ip, port, rest):
     client = AsyncHTTPClient()
     response = yield client.fetch("http://%s:%s/%s" % (ip, port, rest))
     self.write(response.body)  # TODO: capture more data of response
コード例 #40
0
    def github_api_request(self, api_url, etag=None):
        client = AsyncHTTPClient()
        if self.auth:
            # Add auth params. After logging!
            api_url = url_concat(api_url, self.auth)

        headers = {}
        if etag:
            headers['If-None-Match'] = etag
        req = HTTPRequest(api_url, headers=headers, user_agent="BinderHub")

        try:
            resp = yield client.fetch(req)
        except HTTPError as e:
            if e.code == 304:
                resp = e.response
            elif (
                e.code == 403
                and e.response
                and e.response.headers.get('x-ratelimit-remaining') == '0'
            ):
                rate_limit = e.response.headers['x-ratelimit-limit']
                reset_timestamp = int(e.response.headers['x-ratelimit-reset'])
                reset_seconds = int(reset_timestamp - time.time())
                self.log.error(
                    "GitHub Rate limit ({limit}) exceeded. Reset in {delta}.".format(
                        limit=rate_limit,
                        delta=timedelta(seconds=reset_seconds),
                    )
                )
                # round expiry up to nearest 5 minutes
                minutes_until_reset = 5 * (1 + (reset_seconds // 60 // 5))

                raise ValueError("GitHub rate limit exceeded. Try again in %i minutes."
                    % minutes_until_reset
                )
            # Status 422 is returned by the API when we try and resolve a non
            # existent reference
            elif e.code in (404, 422):
                return None
            else:
                raise

        # record and log github rate limit
        remaining = int(resp.headers['x-ratelimit-remaining'])
        rate_limit = int(resp.headers['x-ratelimit-limit'])
        reset_timestamp = int(resp.headers['x-ratelimit-reset'])

        # record with prometheus
        GITHUB_RATE_LIMIT.set(remaining)

        # log at different levels, depending on remaining fraction
        fraction = remaining / rate_limit
        if fraction < 0.2:
            log = self.log.warning
        elif fraction < 0.5:
            log = self.log.info
        else:
            log = self.log.debug

        # str(timedelta) looks like '00:32'
        delta = timedelta(seconds=int(reset_timestamp - time.time()))
        log("GitHub rate limit remaining {remaining}/{limit}. Reset in {delta}.".format(
            remaining=remaining, limit=rate_limit, delta=delta,
        ))
        return resp
コード例 #41
0
ファイル: test_server.py プロジェクト: ydo-kimura/httpwatcher
    def exec_watch_server_tests(self, base_path):
        _base_path = base_path.strip('/')
        if _base_path:
            _base_path += "/"
        client = AsyncHTTPClient()
        client.fetch("http://localhost:5555/%s" % _base_path, self.stop)
        response = self.wait()

        self.assertEqual(200, response.code)
        html = html5lib.parse(response.body)
        ns = get_html_namespace(html)
        self.assertEqual(
            "Hello world",
            html_findall(html, ns, "./{ns}head/{ns}title")[0].text.strip())

        script_tags = html_findall(html, ns, "./{ns}body/{ns}script")
        self.assertEqual(2, len(script_tags))
        self.assertEqual("http://localhost:5555/httpwatcher.min.js",
                         script_tags[0].attrib['src'])
        self.assertEqual('httpwatcher("ws://localhost:5555/httpwatcher");',
                         script_tags[1].text.strip())

        # if it's a non-standard base path
        if len(_base_path) > 0:
            # we shouldn't be able to find anything at the root base path
            client.fetch("http://localhost:5555/", self.stop)
            response = self.wait()
            self.assertEqual(404, response.code)

        # test a file from the sub-path
        client.fetch("http://localhost:5555/%ssubfolder/" % _base_path,
                     self.stop)
        response = self.wait()
        self.assertEqual(200, response.code)
        html = html5lib.parse(response.body)
        ns = get_html_namespace(html)
        self.assertEqual(
            "Level 1 Test",
            html_findall(html, ns, "./{ns}head/{ns}title")[0].text.strip())

        # test fetching from the sub-path without a trailing slash
        client.fetch("http://localhost:5555/%ssubfolder" % _base_path,
                     self.stop)
        response = self.wait()
        self.assertEqual(200, response.code)

        # test a file from the sub-sub-path
        client.fetch(
            "http://localhost:5555/%ssubfolder/subsubfolder/" % _base_path,
            self.stop)
        response = self.wait()
        self.assertEqual(200, response.code)
        html = html5lib.parse(response.body)
        ns = get_html_namespace(html)
        self.assertEqual(
            "Level 2 Test",
            html_findall(html, ns, "./{ns}head/{ns}title")[0].text.strip())

        # test fetching from the sub-sub-path without a trailing slash
        client.fetch(
            "http://localhost:5555/%ssubfolder/subsubfolder" % _base_path,
            self.stop)
        response = self.wait()
        self.assertEqual(200, response.code)

        # fetch the httpwatcher.min.js file
        client.fetch("http://localhost:5555/httpwatcher.min.js", self.stop)
        response = self.wait()

        self.assertEqual(200, response.code)
        self.assertEqual(self.expected_httpwatcher_js, response.body)

        # now connect via WebSockets
        websocket_connect("ws://localhost:5555/httpwatcher").add_done_callback(
            lambda future: self.stop(future.result()))
        websocket_client = self.wait()

        # trigger a watcher reload
        write_file(self.temp_path, "README.txt", "Hello world!")

        IOLoop.current().call_later(
            1.0, lambda: websocket_client.read_message(lambda future: self.
                                                       stop(future.result())))
        msg = json.loads(self.wait())
        self.assertIn("command", msg)
        self.assertEqual("reload", msg["command"])
コード例 #42
0
class AsyncDynamoDB(AWSAuthConnection):
    """
    The main class for asynchronous connections to DynamoDB.

    The user should maintain one instance of this class (though more than one is ok),
    parametrized with the user's access key and secret key. Make calls with make_request
    or the helper methods, and AsyncDynamoDB will maintain session tokens in the background.


    As in Boto Layer1:
    "This is the lowest-level interface to DynamoDB.  Methods at this
    layer map directly to API requests and parameters to the methods
    are either simple, scalar values or they are the Python equivalent
    of the JSON input as defined in the DynamoDB Developer's Guide.
    All responses are direct decoding of the JSON response bodies to
    Python data structures via the json or simplejson modules."
    """

    DefaultHost = 'dynamodb.us-east-1.amazonaws.com'
    """The default DynamoDB API endpoint to connect to."""

    ServiceName = 'DynamoDB'
    """The name of the Service"""

    Version = '20120810'
    """DynamoDB API version."""

    ThruputError = "ProvisionedThroughputExceededException"
    """The error response returned when provisioned throughput is exceeded"""

    ExpiredSessionError = 'com.amazon.coral.service#ExpiredTokenException'
    """The error response returned when session token has expired"""

    UnrecognizedClientException = 'com.amazon.coral.service#UnrecognizedClientException'
    '''Another error response that is possible with a bad session token'''
    def __init__(self,
                 aws_access_key_id=None,
                 aws_secret_access_key=None,
                 is_secure=True,
                 port=None,
                 proxy=None,
                 proxy_port=None,
                 host=None,
                 debug=0,
                 session_token=None,
                 endpoint=None,
                 authenticate_requests=True,
                 validate_cert=True,
                 max_sts_attempts=3,
                 ioloop=None):
        if not host:
            host = self.DefaultHost
        if endpoint is not None:
            self.url = endpoint
            parse_url = urlparse(self.url)
            self.host = parse_url.hostname
            self.port = parse_url.port
            self.protocol = parse_url.scheme
        else:
            self.protocol = 'https' if is_secure else 'http'
            self.host = host
            self.port = port

            url = '{0}://{1}'.format(self.protocol, self.host)

            if self.port:
                url += ':{}'.format(self.port)

            self.url = url
        self.validate_cert = validate_cert
        self.authenticate_requests = authenticate_requests
        AWSAuthConnection.__init__(self,
                                   self.host,
                                   aws_access_key_id,
                                   aws_secret_access_key,
                                   is_secure,
                                   self.port,
                                   proxy,
                                   proxy_port,
                                   debug=debug,
                                   security_token=session_token,
                                   validate_certs=self.validate_cert)
        self.ioloop = ioloop or IOLoop.instance()
        self.http_client = AsyncHTTPClient(io_loop=self.ioloop)
        self.pending_requests = deque()
        self.sts = AsyncAwsSts(aws_access_key_id, aws_secret_access_key,
                               is_secure, self.port, proxy, proxy_port)
        assert (isinstance(max_sts_attempts, int) and max_sts_attempts >= 0)
        self.max_sts_attempts = max_sts_attempts

    def _init_session_token_cb(self, error=None):
        if error:
            logging.warn("Unable to get session token: %s" % error)

    def _required_auth_capability(self):
        return ['hmac-v4']

    def _update_session_token(self, callback, attempts=0, bypass_lock=False):
        '''
        Begins the logic to get a new session token. Performs checks to ensure
        that only one request goes out at a time and that backoff is respected, so
        it can be called repeatedly with no ill effects. Set bypass_lock to True to
        override this behavior.
        '''
        if self.provider.security_token == PENDING_SESSION_TOKEN_UPDATE and not bypass_lock:
            return
        self.provider.security_token = PENDING_SESSION_TOKEN_UPDATE  # invalidate the current security token
        return self.sts.get_session_token(
            functools.partial(self._update_session_token_cb,
                              callback=callback,
                              attempts=attempts))

    def _update_session_token_cb(self,
                                 creds,
                                 provider='aws',
                                 callback=None,
                                 error=None,
                                 attempts=0):
        '''
        Callback to use with `async_aws_sts`. The 'provider' arg is a bit misleading,
        it is a relic from boto and should probably be left to its default. This will
        take the new Credentials obj from `async_aws_sts.get_session_token()` and use
        it to update self.provider, and then will clear the deque of pending requests.

        A callback is optional. If provided, it must be callable without any arguments,
        but also accept an optional error argument that will be an instance of BotoServerError.
        '''
        def raise_error():
            # get out of locked state
            self.provider.security_token = None
            if callable(callback):
                return callback(error=error)
            else:
                logging.error(error)
                raise error

        if error:
            if isinstance(error, InvalidClientTokenIdError):
                # no need to retry if error is due to bad tokens
                raise_error()
            else:
                if attempts > self.max_sts_attempts:
                    raise_error()
                else:
                    seconds_to_wait = (0.1 * (2**attempts))
                    logging.warning(
                        "Got error[ %s ] getting session token, retrying in %.02f seconds"
                        % (error, seconds_to_wait))
                    self.ioloop.add_timeout(
                        time.time() + seconds_to_wait,
                        functools.partial(self._update_session_token,
                                          attempts=attempts + 1,
                                          callback=callback,
                                          bypass_lock=True))
                    return
        else:
            self.provider = Provider(provider, creds.access_key,
                                     creds.secret_key, creds.session_token)
            # force the correct auth, with the new provider
            self._auth_handler = HmacAuthV4Handler(self.host, None,
                                                   self.provider)
            while self.pending_requests:
                request = self.pending_requests.pop()
                request()
            if callable(callback):
                return callback()

    def make_request(self, action, body='', callback=None, object_hook=None):
        '''
        Make an asynchronous HTTP request to DynamoDB. Callback should operate on
        the decoded json response (with object hook applied, of course). It should also
        accept an error argument, which will be a boto.exception.DynamoDBResponseError.

        If there is not a valid session token, this method will ensure that a new one is fetched
        and cache the request when it is retrieved.
        '''
        this_request = functools.partial(self.make_request,
                                         action=action,
                                         body=body,
                                         callback=callback,
                                         object_hook=object_hook)
        if self.authenticate_requests and self.provider.security_token in [
                None, PENDING_SESSION_TOKEN_UPDATE
        ]:
            # we will not be able to complete this request because we do not have a valid session token.
            # queue it and try to get a new one. _update_session_token will ensure that only one request
            # for a session token goes out at a time
            self.pending_requests.appendleft(this_request)

            def cb_for_update(error=None):
                # create a callback to handle errors getting session token
                # callback here is assumed to take a json response, and an instance of DynamoDBResponseError
                if error:
                    return callback({},
                                    error=DynamoDBResponseError(
                                        error.status, error.reason,
                                        error.body))
                else:
                    return

            self._update_session_token(cb_for_update)
            return
        headers = {
            'X-Amz-Target':
            '%s_%s.%s' % (self.ServiceName, self.Version, action),
            'Content-Type': 'application/x-amz-json-1.0',
            'Content-Length': str(len(body))
        }
        request = HTTPRequest(self.url,
                              method='POST',
                              headers=headers,
                              body=body,
                              validate_cert=self.validate_cert)
        request.path = '/'  # Important! set the path variable for signing by boto (<2.7). '/' is the path for all dynamodb requests
        request.auth_path = '/'  # Important! set the auth_path variable for signing by boto(>2.7). '/' is the path for all dynamodb requests
        request.params = {}
        request.port = self.port
        request.protocol = self.protocol
        request.host = self.host
        if self.authenticate_requests:
            self._auth_handler.add_auth(
                request)  # add signature to headers of the request
        self.http_client.fetch(request,
                               functools.partial(
                                   self._finish_make_request,
                                   callback=callback,
                                   orig_request=this_request,
                                   token_used=self.provider.security_token,
                                   object_hook=object_hook))  # bam!

    def _finish_make_request(self,
                             response,
                             callback,
                             orig_request,
                             token_used,
                             object_hook=None):
        '''
        Check for errors and decode the json response (in the tornado response body), then pass on to orig callback.
        This method also contains some of the logic to handle reacquiring session tokens.
        '''
        try:
            json_response = json.loads(response.body, object_hook=object_hook)
        except TypeError:
            json_response = None

        if json_response and response.error:
            # Normal error handling where we have a JSON response from AWS.
            if any((token_error in json_response.get('__type', []) \
                    for token_error in (self.ExpiredSessionError, self.UnrecognizedClientException))):
                if self.provider.security_token == token_used:
                    # the token that we used has expired. wipe it out
                    self.provider.security_token = None
                return orig_request(
                )  # make_request will handle logic to get a new token if needed, and queue until it is fetched
            else:
                # because some errors are benign, include the response when an error is passed
                return callback(json_response,
                                error=DynamoDBResponseError(
                                    response.error.code,
                                    response.error.message, json_response))

        if json_response is None:
            # We didn't get any JSON back, but we also didn't receive an error response. This can't be right.
            return callback(None,
                            error=DynamoDBResponseError(
                                response.code, response.body))
        else:
            return callback(json_response, error=None)

    def get_item(self,
                 table_name,
                 key,
                 callback,
                 attributes_to_get=None,
                 consistent_read=False,
                 object_hook=None):
        '''
        Return a set of attributes for an item that matches
        the supplied key.

        The callback should operate on a dict representing the decoded
        response from DynamoDB (using the object_hook, if supplied)

        :type table_name: str
        :param table_name: The name of the table to delete.

        :type key: dict
        :param key: A Python version of the Key data structure
            defined by DynamoDB.

        :type attributes_to_get: list
        :param attributes_to_get: A list of attribute names.
            If supplied, only the specified attribute names will
            be returned.  Otherwise, all attributes will be returned.

        :type consistent_read: bool
        :param consistent_read: If True, a consistent read
            request is issued.  Otherwise, an eventually consistent
            request is issued.        '''
        data = {'TableName': table_name, 'Key': key}
        if attributes_to_get:
            data['AttributesToGet'] = attributes_to_get
        if consistent_read:
            data['ConsistentRead'] = True
        return self.make_request('GetItem',
                                 body=json.dumps(data),
                                 callback=callback,
                                 object_hook=object_hook)

    def batch_get_item(self, request_items, callback):
        """
        Return a set of attributes for a multiple items in
        multiple tables using their primary keys.

        The callback should operate on a dict representing the decoded
        response from DynamoDB (using the object_hook, if supplied)

        :type request_items: dict
        :param request_items: A Python version of the RequestItems
            data structure defined by DynamoDB.
        """
        data = {'RequestItems': request_items}
        json_input = json.dumps(data)
        self.make_request('BatchGetItem', json_input, callback)

    def put_item(self,
                 table_name,
                 item,
                 callback,
                 expected=None,
                 return_values=None,
                 object_hook=None):
        '''
        Create a new item or replace an old item with a new
        item (including all attributes).  If an item already
        exists in the specified table with the same primary
        key, the new item will completely replace the old item.
        You can perform a conditional put by specifying an
        expected rule.

        The callback should operate on a dict representing the decoded
        response from DynamoDB (using the object_hook, if supplied)

        :type table_name: str
        :param table_name: The name of the table to delete.

        :type item: dict
        :param item: A Python version of the Item data structure
            defined by DynamoDB.

        :type expected: dict
        :param expected: A Python version of the Expected
            data structure defined by DynamoDB.

        :type return_values: str
        :param return_values: Controls the return of attribute
            name-value pairs before then were changed.  Possible
            values are: None or 'ALL_OLD'. If 'ALL_OLD' is
            specified and the item is overwritten, the content
            of the old item is returned.
        '''
        data = {'TableName': table_name, 'Item': item}
        if expected:
            data['Expected'] = expected
        if return_values:
            data['ReturnValues'] = return_values
        json_input = json.dumps(data)
        return self.make_request('PutItem',
                                 json_input,
                                 callback=callback,
                                 object_hook=object_hook)

    def query(self,
              table_name,
              hash_key_value,
              callback,
              range_key_conditions=None,
              attributes_to_get=None,
              limit=None,
              consistent_read=False,
              scan_index_forward=True,
              exclusive_start_key=None,
              object_hook=None):
        '''
        Perform a query of DynamoDB.  This version is currently punting
        and expecting you to provide a full and correct JSON body
        which is passed as is to DynamoDB.

        The callback should operate on a dict representing the decoded
        response from DynamoDB (using the object_hook, if supplied)

        :type table_name: str
        :param table_name: The name of the table to delete.

        :type hash_key_value: dict
        :param key: A DynamoDB-style HashKeyValue.

        :type range_key_conditions: dict
        :param range_key_conditions: A Python version of the
            RangeKeyConditions data structure.

        :type attributes_to_get: list
        :param attributes_to_get: A list of attribute names.
            If supplied, only the specified attribute names will
            be returned.  Otherwise, all attributes will be returned.

        :type limit: int
        :param limit: The maximum number of items to return.

        :type consistent_read: bool
        :param consistent_read: If True, a consistent read
            request is issued.  Otherwise, an eventually consistent
            request is issued.

        :type scan_index_forward: bool
        :param scan_index_forward: Specified forward or backward
            traversal of the index.  Default is forward (True).

        :type exclusive_start_key: list or tuple
        :param exclusive_start_key: Primary key of the item from
            which to continue an earlier query.  This would be
            provided as the LastEvaluatedKey in that query.
        '''
        data = {'TableName': table_name, 'HashKeyValue': hash_key_value}
        if range_key_conditions:
            data['RangeKeyCondition'] = range_key_conditions
        if attributes_to_get:
            data['AttributesToGet'] = attributes_to_get
        if limit:
            data['Limit'] = limit
        if consistent_read:
            data['ConsistentRead'] = True
        if scan_index_forward:
            data['ScanIndexForward'] = True
        else:
            data['ScanIndexForward'] = False
        if exclusive_start_key:
            data['ExclusiveStartKey'] = exclusive_start_key
        json_input = json.dumps(data)
        return self.make_request('Query',
                                 body=json_input,
                                 callback=callback,
                                 object_hook=object_hook)
コード例 #43
0
ファイル: index.py プロジェクト: zhxins/tornadoProject
    def getData(self):
        client = AsyncHTTPClient()
        url = "http://s.budejie.com/topic/tag-topic/64/hot/budejie-android-"
        res = yield client.fetch(url)

        raise tornado.gen.Return(res)  # 把数据返回
コード例 #44
0
def test_root_redirect(c, s, a, b):
    http_client = AsyncHTTPClient()
    response = yield http_client.fetch('http://localhost:%d/' %
                                       s.services['bokeh'].port)
    assert response.code == 200
    assert "/status" in response.effective_url
コード例 #45
0
class TornadoMixpanelConsumer:
    def __init__(self,
                 events_url=None,
                 people_url=None,
                 import_url=None,
                 request_timeout=None,
                 ioloop=None):
        self._endpoints = {
            'events': events_url or 'https://api.mixpanel.com/track',
            'people': people_url or 'https://api.mixpanel.com/engage',
            'imports': import_url or 'https://api.mixpanel.com/import',
        }
        self._queues = {}
        self._request_timeout = request_timeout

        if ioloop is None:
            ioloop = IOLoop.current()

        self.ioloop = ioloop
        self._api_key = None
        self._httpclient = AsyncHTTPClient()
        self._tasks = []
        for endpoint in self._endpoints:
            self._queues[endpoint] = asyncio.Queue()
            self._tasks.append(asyncio.ensure_future(self.flush(endpoint)))

    def shutdown(self):
        for task in self._tasks:
            task.cancel()

    def send(self, endpoint, json_message, api_key=None):

        if endpoint not in self._endpoints:
            raise Exception(
                'Mixpanel error: No such endpoint "{0}". Valid endpoints are one of {1}'
                .format(endpoint, self._endpoints.keys()))

        if api_key is not None:
            self._api_key = api_key
        self._queues[endpoint].put_nowait(json_message)

    async def flush(self, endpoint, flush_delay_limit=10, max_size=50):

        last_flush = 0  # 0 so that the first event is always sent
        batch = []
        while True:
            batch.append(await self._queues[endpoint].get())
            while len(batch) < max_size and time.time(
            ) - flush_delay_limit < last_flush:
                try:
                    batch.append(
                        (await asyncio.wait_for(self._queues[endpoint].get(),
                                                2)))
                except asyncio.TimeoutError:
                    break
            batch_json = '[{0}]'.format(','.join(batch))
            batch = []
            last_flush = time.time()

            data = {
                'data': base64.b64encode(batch_json.encode('utf8')),
                'verbose': 1,
                'ip': 0,
            }
            if self._api_key:
                data.update({'api_key': self._api_key})
            encoded_data = urllib.parse.urlencode(data).encode('utf8')

            resp = await to_asyncio_future(
                self._httpclient.fetch(self._endpoints[endpoint],
                                       method="POST",
                                       headers={
                                           'Content-Type':
                                           "application/x-www-form-urlencoded"
                                       },
                                       body=encoded_data))

            try:
                response = json_decode(resp.body)
                if response['status'] != 1:
                    log.error('Mixpanel error: {0}'.format(response['error']))
            except ValueError:
                log.exception(
                    'Cannot interpret Mixpanel server response: {0}'.format(
                        resp.body))
コード例 #46
0
ファイル: http.py プロジェクト: Laisky/kipp
class HTTPSessionClient:
    """HTTPClient with permanent cookies"""
    def __init__(self, *args, **kw):
        self._cookies_dict = {}
        self.httpclient = AsyncHTTPClient(*args, **kw)

    def close(self):
        self.httpclient.close()

    def __exit__(self):
        self.close()

    def get(self, *args, **kw):
        """Request HTTP via GET"""
        kw.update({"method": "GET"})
        return self.fetch(*args, **kw)

    def delete(self, *args, **kw):
        """Request HTTP via DELETE"""
        kw.update({"method": "DELETE"})
        return self.fetch(*args, **kw)

    def post(self, *args, **kw):
        """Request HTTP via POST"""
        kw.update({"method": "POST"})
        return self.fetch(*args, **kw)

    def patch(self, *args, **kw):
        """Request HTTP via PATCH"""
        kw.update({"method": "PATCH"})
        return self.fetch(*args, **kw)

    def head(self, *args, **kw):
        """Request HTTP via HEAD"""
        kw.update({"method": "HEAD"})
        return self.fetch(*args, **kw)

    @coroutine
    def fetch(self, *args, **kw):
        """Generate HTTP Request

        Args:
            request (str): url
            method (str): ``GET`` / ``POST``
            params (dict): url parameters
            data (dict): form
            json (dict): form for json
            cookies (dict):
            headers (dict):
        """
        args = self._parse_url(args, kw)
        self._parse_headers(kw)
        self._parse_body(kw)

        get_logger().debug("HTTPSessionClient fetch for args %s, kw %s", args,
                           kw)
        resp = yield self.httpclient.fetch(*args, **kw)
        resp = HTTPResponse(resp)
        self._load_cookies_fr_resp(resp)
        return_in_coroutine(resp)

    def _parse_url(self, args, kw):
        """Add parameters to url"""
        params = kw.pop("params", {})
        url = kw.pop("request", "") or args[0]
        url = url_concat(url, params)
        return (url, ) + args[1:]

    def _parse_body(self, kw):
        """Convert body into suitable format"""
        body = kw.get("body")
        data = kw.pop("data", {})
        djson = kw.pop("json", {})
        assert not (body and
                    (data
                     or djson)), "you should not specified body with data/json"
        assert not (data
                    and djson), "you should not specified both data and json"
        if data:
            kw["body"] = str(urlencode(data))
            kw["headers"]["Content-Type"] = "application/x-www-form-urlencoded"

        if djson:
            kw["body"] = json_encode(djson)
            kw["headers"]["Content-Type"] = "application/javascript"

    def _parse_headers(self, kw):
        """Add ``Connection`` & ``Cookie`` into headers"""
        kw["headers"] = kw.get("headers", {})
        kw["headers"]["Connection"] = "keep-alive"
        self._parse_cookies(kw)

    def _parse_cookies(self, kw):
        """Add user custom cookies into headers"""
        user_cookies = kw.pop("cookies", {})
        user_cookies.update(kw["headers"].get("Cookie", {}))
        cookies = self._get_cookies(user_cookies)
        if cookies:
            kw["headers"]["Cookie"] = cookies

    def _get_cookies(self, user_cookies=None):
        """Concatenate legacy cookies with user cookies"""
        cookies = self._parse_cookies_to_str(self._cookies_dict)
        if user_cookies:
            cookies = self._parse_cookies_to_str(user_cookies)

        get_logger().debug("_get_cookies return cookies: %s", cookies)
        return cookies

    def _load_cookies_fr_resp(self, resp):
        """Load cookies from response"""
        for new_cookie in resp.cookies:
            for n, v in parse_cookie(new_cookie).items():
                self._cookies_dict[n] = v

    def _parse_cookies_to_str(self, cookies):
        """Parse dict of cookies into string"""
        return ";".join(["{}={}".format(n, v) for n, v in cookies.items()])
コード例 #47
0
    def authenticate(self, handler, data=None):
        code = handler.get_argument("code")
        # TODO: Configure the curl_httpclient for tornado
        http_client = AsyncHTTPClient()

        params = dict(redirect_uri=self.get_callback_url(handler),
                      code=code,
                      grant_type='authorization_code')

        if self.token_url:
            url = self.token_url
        else:
            raise ValueError(
                "Please set the OAUTH2_TOKEN_URL environment variable")

        b64key = base64.b64encode(
            bytes("{}:{}".format(self.client_id, self.client_secret), "utf8"))

        headers = {
            "Accept": "application/json",
            "User-Agent": "JupyterHub",
            "Authorization": "Basic {}".format(b64key.decode("utf8"))
        }
        req = HTTPRequest(
            url,
            method="POST",
            headers=headers,
            body=urllib.parse.urlencode(
                params)  # Body is required for a POST...
        )

        resp = yield http_client.fetch(req)

        resp_json = json.loads(resp.body.decode('utf8', 'replace'))

        access_token = resp_json['access_token']
        refresh_token = resp_json.get('refresh_token', None)
        token_type = resp_json['token_type']
        scope = (resp_json.get('scope', '')).split(' ')

        # Determine who the logged in user is
        headers = {
            "Accept": "application/json",
            "User-Agent": "JupyterHub",
            "Authorization": "{} {}".format(token_type, access_token)
        }
        if self.userdata_url:
            url = url_concat(self.userdata_url, self.userdata_params)
        else:
            raise ValueError(
                "Please set the OAUTH2_USERDATA_URL environment variable")

        req = HTTPRequest(
            url,
            method=self.userdata_method,
            headers=headers,
        )

        resp = yield http_client.fetch(req)

        resp_json = json.loads(resp.body.decode('utf8', 'replace'))

        username = resp_json.get(self.username_key)
        if not username:
            username = resp_json.get("username")

        if not username:
            self.log.error("OAuth user contains no key %s: %s",
                           self.username_key, resp_json)
            return

        if self.authorized_groups:
            authorized = False

            if username in self.authorized_groups:
                authorized = True

            if not authorized:
                _group_urls = self.group_urls.split(",")
                for g_url in _group_urls:
                    groups_req = HTTPRequest(g_url.strip(),
                                             method="GET",
                                             headers=headers)
                    groups_resp = None
                    try:
                        groups_resp = yield http_client.fetch(groups_req)
                    except HTTPClientError as e:
                        if e.response:
                            self.log.error(
                                "failed to fetch groups for: %s. Reason: %s",
                                g_url, e.response.reason)

                        continue

                    groups_resp_json = json.loads(
                        groups_resp.body.decode('utf8', 'replace'))

                    # Determine whether the user is member of one of the authorized groups
                    user_group_id = [g["id"] for g in groups_resp_json]
                    for group_id in self.authorized_groups.split(","):
                        if group_id in user_group_id:
                            authorized = True
                            break

                    if authorized:
                        break

            if not authorized:
                return

        return {
            'name': username,
            'auth_state': {
                'access_token': access_token,
                'refresh_token': refresh_token,
                'oauth_user': resp_json,
                'scope': scope,
            }
        }
コード例 #48
0
ファイル: index.py プロジェクト: zhxins/tornadoProject
    def get(self):
        # 创建客户端

        client = AsyncHTTPClient()
        url = "http://s.budejie.com/topic/tag-topic/64/hot/budejie-android-"
        client.fetch(url, self.on_response)
コード例 #49
0
class Crawler:
    def __init__(self, policy):
        """
        :type policy: ScrapingPolicy
        """
        self.policy = policy
        self.requests_in_flight = 0
        self.crawled = set()
        self.backlog = set()
        self.redirects = {}  # maps URLs to their destination
        self.errors = {}  # maps URLs to their (error) HTTP status codes
        AsyncHTTPClient.configure(
            None,
            defaults=dict(
                user_agent=
                'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/35.0.1916.47 Safari/537.36'
            ))
        self.http_client = AsyncHTTPClient(
            max_clients=policy.max_concurrent_requests)

    def crawl(self):
        """
        Crawls the site and writes the scraped HTML (and images, etc.) to disk
        """
        self.enqueue_urls([self.policy.start_at_url])
        ioloop.IOLoop.instance().start()

        # Only after IOLoop.instance().stop() has been called...
        with open(os.path.join(self.policy.out_directory, 'errors.log'),
                  'w') as error_file:
            error_file.writelines(f"HTTP Status {status_code}: {url}\n"
                                  for url, status_code in self.errors.items())

        with open(os.path.join(self.policy.out_directory, 'redirects.conf'),
                  'w') as redirects_file:
            for original, destination in self.redirects.items():
                parsed = urlparse(original)
                if parsed.query:
                    # TODO: This really needs a test on a live Nginx server!
                    redirects_file.write(f"if ($args ~* \"{parsed.query}\") {{"
                                         f"    rewrite ^{destination}? last;"
                                         "}\n")
                else:
                    redirects_file.write(
                        f"rewrite ^{parsed.path}$ {destination} permanent;\n")

    def handle_response(self, url, response):
        def find_links(html, policy):
            """
            :type html: str
            :type policy: ScrapingPolicy
            :rtype: Iterator[str]
            """
            soup = BeautifulSoup(html, 'lxml')
            for element in soup.find_all():
                if 'srcset' in element.attrs:
                    for source in element.attrs['srcset'].split(','):
                        src = source.rsplit(' ', 1)[:-1]
                        if policy.shouldCrawlUrl(src):
                            yield policy.canonicalize(src)
                else:
                    for potential_attr in ['href', 'src']:
                        if potential_attr in element.attrs and policy.shouldCrawlUrl(
                                element.attrs[potential_attr]):
                            yield policy.canonicalize(
                                element.attrs[potential_attr])

        if response.error:
            try:
                self.errors[url] = response.error.code
            except:
                self.errors[url] = str(response.error)
        else:
            if self.response_is_text(response):
                content = response.body.decode('utf-8')
                linked_urls = find_links(content, self.policy)
                self.enqueue_urls(linked_urls)
            else:  # treat as binary
                content = response.body
            self.scrape(url, response.effective_url, content)

        self.requests_in_flight -= 1

        self.run_backlog()

    @staticmethod
    def response_is_text(response):
        """
        :type response: httpclient.HTTPResponse
        :rtype: bool
        """
        if 'content-type' in response.headers:
            return not response.headers['content-type'] or response.headers[
                'content-type'].startswith('text/')
        return False

    def enqueue_urls(self, urls):
        """
        :type urls: Iterable[str]
        """
        for url in urls:
            if self.requests_in_flight < self.policy.max_concurrent_requests:
                self._enqueue_internal(url)
            else:
                self.backlog.add(url)

    def _enqueue_internal(self, url):
        if url not in self.crawled:
            self.crawled.add(url)
            self.requests_in_flight += 1
            self.http_client.fetch(url.strip(),
                                   partial(self.handle_response, url),
                                   raise_error=False)

    def run_backlog(self):
        while self.backlog and self.requests_in_flight < self.policy.max_concurrent_requests:
            self._enqueue_internal(self.backlog.pop())

        if not self.backlog and self.requests_in_flight == 0:
            ioloop.IOLoop.instance().stop()  # all done!!

    def scrape(self, initial_url, final_url, content):
        """
        :param initial_url: The URL you requested
        :type initial_url: str
        :param final_url: The URL you wound up at after any redirects (may be the same!)
        :type final_url: str
        :param content: The full content of the URL; may be HTML for us to parse, or just binary data; in either case it gets written to disk
        :type content: str|bytes
        """
        assert content

        if initial_url != final_url:
            self.redirects[initial_url] = final_url

        if self.policy.shouldScrapeUrl(final_url):
            path = self.get_local_path_from_url(final_url)
            try:  # Create the parent directory
                os.makedirs(os.path.abspath(os.path.join(path, os.pardir)))
            except OSError as exception:
                if exception.errno != errno.EEXIST:
                    raise exception
            if isinstance(content, str):
                with open(path, 'w') as f:
                    f.write(self.policy.extractContent(content))
            else:
                with open(path, 'wb') as f:
                    f.write(content)

    def get_local_path_from_url(self, canonical_url):
        """
        :type canonical_url: str
        :return: str
        """
        url_path = urlparse(canonical_url).path  # ignore the domain
        assert url_path.startswith('/')
        if url_path.endswith('/'):
            url_path += 'index.html'
        elif '.' not in url_path.split(
                '/')[-1]:  # no extension in the final path component
            url_path += '/index.html'
        return os.path.join(self.policy.out_directory,
                            url_path[1:])  # drop the leading slash
コード例 #50
0
class KubeSpawner(Spawner):
    """
    Implement a JupyterHub spawner to spawn pods in a Kubernetes Cluster.

    """
    def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)
        # By now, all the traitlets have been set, so we can use them to compute
        # other attributes
        self.httpclient = AsyncHTTPClient()
        # FIXME: Support more than just kubeconfig
        self.request = request_maker()
        self.pod_name = self._expand_user_properties(self.pod_name_template)
        self.pvc_name = self._expand_user_properties(self.pvc_name_template)
        if self.hub_connect_ip:
            scheme, netloc, path, params, query, fragment = urlparse(self.hub.api_url)
            netloc = '{ip}:{port}'.format(
                ip=self.hub_connect_ip,
                port=self.hub_connect_port,
            )
            self.accessible_hub_api_url = urlunparse((scheme, netloc, path, params, query, fragment))
        else:
            self.accessible_hub_api_url = self.hub.api_url

    namespace = Unicode(
        config=True,
        help="""
        Kubernetes namespace to spawn user pods in.

        If running inside a kubernetes cluster with service accounts enabled,
        defaults to the current namespace. If not, defaults to 'default'
        """
    )

    def _namespace_default(self):
        """
        Set namespace default to current namespace if running in a k8s cluster

        If not in a k8s cluster with service accounts enabled, default to
        'default'
        """
        ns_path = '/var/run/secrets/kubernetes.io/serviceaccount/namespace'
        if os.path.exists(ns_path):
            with open(ns_path) as f:
                return f.read().strip()
        return 'default'

    pod_name_template = Unicode(
        'jupyter-{username}-{userid}',
        config=True,
        help="""
        Template to use to form the name of user's pods.

        {username} and {userid} are expanded to the escaped, dns-label safe
        username & integer user id respectively.

        This must be unique within the namespace the pods are being spawned
        in, so if you are running multiple jupyterhubs spawning in the
        same namespace, consider setting this to be something more unique.
        """
    )

    pvc_name_template = Unicode(
        'claim-{username}-{userid}',
        config=True,
        help="""
        Template to use to form the name of user's pvc.

        {username} and {userid} are expanded to the escaped, dns-label safe
        username & integer user id respectively.

        This must be unique within the namespace the pvc are being spawned
        in, so if you are running multiple jupyterhubs spawning in the
        same namespace, consider setting this to be something more unique.
        """
    )

    hub_connect_ip = Unicode(
        None,
        config=True,
        allow_none=True,
        help="""
        IP/DNS hostname to be used by pods to reach out to the hub API.

        Defaults to `None`, in which case the `hub_ip` config is used.

        In kubernetes contexts, this is often not the same as `hub_ip`,
        since the hub runs in a pod which is fronted by a service. This IP
        should be something that pods can access to reach the hub process.
        This can also be through the proxy - API access is authenticated
        with a token that is passed only to the hub, so security is fine.

        Usually set to the service IP / DNS name of the service that fronts
        the hub pod (deployment/replicationcontroller/replicaset)

        Used together with `hub_connect_port` configuration.
        """
    )

    hub_connect_port = Integer(
        config=True,
        help="""
        Port to use by pods to reach out to the hub API.

        Defaults to be the same as `hub_port`.

        In kubernetes contexts, this is often not the same as `hub_port`,
        since the hub runs in a pod which is fronted by a service. This
        allows easy port mapping, and some systems take advantage of it.

        This should be set to the `port` attribute of a service that is
        fronting the hub pod.
        """
    )

    def _hub_connect_port_default(self):
        """
        Set default port on which pods connect to hub to be the hub port

        The hub needs to be accessible to the pods at this port. We default
        to the port the hub is listening on. This would be overriden in case
        some amount of port mapping is happening.
        """
        return self.hub.server.port

    singleuser_image_spec = Unicode(
        'jupyter/singleuser:latest',
        config=True,
        help="""
        Docker image spec to use for spawning user's containers.

        Defaults to `jupyter/singleuser:latest`

        Name of the container + a tag, same as would be used with
        a `docker pull` command. If tag is set to `latest`, kubernetes will
        check the registry each time a new user is spawned to see if there
        is a newer image available. If available, new image will be pulled.
        Note that this could cause long delays when spawning, especially
        if the image is large. If you do not specify a tag, whatever version
        of the image is first pulled on the node will be used, thus possibly
        leading to inconsistent images on different nodes. For all these
        reasons, it is recommended to specify a specific immutable tag
        for the imagespec.

        If your image is very large, you might need to increase the timeout
        for starting the single user container from the default. You can
        set this with:

        ```
        c.KubeSpawner.start_timeout = 60 * 5  # Upto 5 minutes
        ```
        """
    )

    singleuser_image_pull_policy = Unicode(
        'IfNotPresent',
        config=True,
        help="""
        The image pull policy of the docker container specified in
        singleuser_image_spec.

        Defaults to `IfNotPresent` which causes the Kubelet to NOT pull the image
        specified in singleuser_image_spec if it already exists, except if the tag
        is :latest. For more information on image pull policy, refer to
        http://kubernetes.io/docs/user-guide/images/

        This configuration is primarily used in development if you are
        actively changing the singleuser_image_spec and would like to pull the image
        whenever a user container is spawned.
        """
    )

    volumes = List(
        [],
        config=True,
        help="""
        List of Kubernetes Volume specifications that will be mounted in the user pod.

        This list will be directly added under `volumes` in the kubernetes pod spec,
        so you should use the same structure. Each item in the list must have the
        following two keys:
          - name
            Name that'll be later used in the `volume_mounts` config to mount this
            volume at a specific path.
          - <name-of-a-supported-volume-type> (such as `hostPath`, `persistentVolumeClaim`,
            etc)
            The key name determines the type of volume to mount, and the value should
            be an object specifying the various options available for that kind of
            volume.

        See http://kubernetes.io/docs/user-guide/volumes/ for more information on the
        various kinds of volumes available and their options. Your kubernetes cluster
        must already be configured to support the volume types you want to use.

        {username} and {userid} are expanded to the escaped, dns-label safe
        username & integer user id respectively, wherever they are used.
        """
    )

    volume_mounts = List(
        [],
        config=True,
        help="""
        List of paths on which to mount volumes in the user notebook's pod.

        This list will be added to the values of the `volumeMounts` key under the user's
        container in the kubernetes pod spec, so you should use the same structure as that.
        Each item in the list should be a dictionary with at least these two keys:
          - mountPath
            The path on the container in which we want to mount the volume.
          - name
            The name of the volume we want to mount, as specified in the `volumes`
            config.

        See http://kubernetes.io/docs/user-guide/volumes/ for more information on how
        the volumeMount item works.

        {username} and {userid} are expanded to the escaped, dns-label safe
        username & integer user id respectively, wherever they are used.
        """
    )

    user_storage_capacity = Unicode(
        None,
        config=True,
        allow_none=True,
        help="""
        The ammount of storage space to request from the volume that the pvc will
        mount to. This ammount will be the ammount of storage space the user has
        to work with on their notebook. If left blank, the kubespawner will not
        create a pvc for the pod.

        This will be added to the `resources: requests: storage:` in the k8s pod spec.

        See http://kubernetes.io/docs/user-guide/persistent-volumes/#persistentvolumeclaims
        for more information on how storage works.

        Quantities can be represented externally as unadorned integers, or as fixed-point
        integers with one of these SI suffices (E, P, T, G, M, K, m) or their power-of-two
        equivalents (Ei, Pi, Ti, Gi, Mi, Ki). For example, the following represent roughly
        'the same value: 128974848, "129e6", "129M" , "123Mi".
        (https://github.com/kubernetes/kubernetes/blob/master/docs/design/resources.md)
        """
    )

    user_storage_class = Unicode(
        None,
        config=True,
        allow_none=True,
        help="""
        The storage class that the pvc will use. If left blank, the kubespawner will not
        create a pvc for the pod.

        This will be added to the `annotations: volume.beta.kubernetes.io/storage-class:`
        in the pvc metadata.

        This will determine what type of volume the pvc will request to use. If one exists
        that matches the criteria of the StorageClass, the pvc will mount to that. Otherwise,
        b/c it has a storage class, k8s will dynamicallly spawn a pv for the pvc to bind to
        and a machine in the cluster for the pv to bind to.

        See http://kubernetes.io/docs/user-guide/persistent-volumes/#storageclasses for
        more information on how StorageClasses work.
        """
    )

    user_storage_access_modes = List(
        ["ReadWriteOnce"],
        config=True,
        help="""
        List of access modes the user has for the pvc.

        The access modes are:
            The access modes are:
                ReadWriteOnce – the volume can be mounted as read-write by a single node
                ReadOnlyMany – the volume can be mounted read-only by many nodes
                ReadWriteMany – the volume can be mounted as read-write by many nodes

        See http://kubernetes.io/docs/user-guide/persistent-volumes/#access-modes for
        more information on how access modes work.
        """
    )

    def _expand_user_properties(self, template):
        # Make sure username matches the restrictions for DNS labels
        safe_chars = set(string.ascii_lowercase + string.digits)
        safe_username = ''.join([s if s in safe_chars else '-' for s in self.user.name.lower()])
        return template.format(
            userid=self.user.id,
            username=safe_username
        )

    def _expand_all(self, src):
        if isinstance(src, list):
            return [self._expand_all(i) for i in src]
        elif isinstance(src, dict):
            return {k: self._expand_all(v) for k, v in src.items()}
        elif isinstance(src, str):
            return self._expand_user_properties(src)
        else:
            return src

    def get_pod_manifest(self):
        """
        Make a pod manifest that will spawn current user's notebook pod.
        """
        # Add a hack to ensure that no service accounts are mounted in spawned pods
        # This makes sure that we don't accidentally give access to the whole
        # kubernetes API to the users in the spawned pods.
        # See https://github.com/kubernetes/kubernetes/issues/16779#issuecomment-157460294
        hack_volumes = [{
            'name': 'no-api-access-please',
            'emptyDir': {}
        }]
        hack_volume_mounts = [{
            'name': 'no-api-access-please',
            'mountPath': '/var/run/secrets/kubernetes.io/serviceaccount',
            'readOnly': True
        }]
        return make_pod_spec(
            self.pod_name,
            self.singleuser_image_spec,
            self.singleuser_image_pull_policy,
            self.get_env(),
            self._expand_all(self.volumes) + hack_volumes,
            self._expand_all(self.volume_mounts) + hack_volume_mounts,
            self.cpu_limit,
            self.cpu_guarantee,
            self.mem_limit,
            self.mem_guarantee,
        )

    def get_pvc_manifest(self):
        """
        Make a pvc manifest that will spawn current user's pvc.
        """
        return make_pvc_spec(
            self.pvc_name,
            self.user_storage_class,
            self.user_storage_access_modes,
            self.user_storage_capacity
        )


    @gen.coroutine
    def get_pod_info(self, pod_name):
        """
        Fetch info about a specific pod with the given pod name in current namespace

        Return `None` if pod with given name does not exist in current namespace
        """
        try:
            response = yield self.httpclient.fetch(self.request(
                k8s_url(
                    self.namespace,
                    'pods',
                    pod_name,
                )
            ))
        except HTTPError as e:
            if e.code == 404:
                return None
            raise
        data = response.body.decode('utf-8')
        return json.loads(data)

    @gen.coroutine
    def get_pvc_info(self, pvc_name):
        """
        Fetch info about a specific pvc with the given pvc name in current namespace

        Return `None` if pvc with given name does not exist in current namespace
        """
        try:
            response = yield self.httpclient.fetch(self.request(
                k8s_url(
                    self.namespace,
                    'persistentvolumeclaims',
                    pvc_name,
                )
            ))
        except HTTPError as e:
            if e.code == 404:
                return None
            raise
        data = response.body.decode('utf-8')
        return json.loads(data)

    def is_pod_running(self, pod):
        """
        Check if the given pod is running

        pod must be a dictionary representing a Pod kubernetes API object.
        """
        return pod['status']['phase'] == 'Running'

    def get_state(self):
        """
        Save state required to reinstate this user's pod from scratch

        We save the pod_name, even though we could easily compute it,
        because JupyterHub requires you save *some* state! Otherwise
        it assumes your server is dead. This works around that.

        It's also useful for cases when the pod_template changes between
        restarts - this keeps the old pods around.
        """
        state = super().get_state()
        state['pod_name'] = self.pod_name
        return state

    def load_state(self, state):
        """
        Load state from storage required to reinstate this user's pod

        Since this runs after __init__, this will override the generated pod_name
        if there's one we have saved in state. These are the same in most cases,
        but if the pod_template has changed in between restarts, it will no longer
        be the case. This allows us to continue serving from the old pods with
        the old names.
        """
        if 'pod_name' in state:
            self.pod_name = state['pod_name']

    @gen.coroutine
    def poll(self):
        """
        Check if the pod is still running.

        Returns None if it is, and 1 if it isn't. These are the return values
        JupyterHub expects.
        """
        data = yield self.get_pod_info(self.pod_name)
        if data is not None and self.is_pod_running(data):
            return None
        return 1

    @gen.coroutine
    def start(self):
        if self.user_storage_class is not None and self.user_storage_capacity is not None:
            pvc_manifest = self.get_pvc_manifest()
            try:
                yield self.httpclient.fetch(self.request(
                    url=k8s_url(self.namespace, 'persistentvolumeclaims'),
                    body=json.dumps(pvc_manifest),
                    method='POST',
                    headers={'Content-Type': 'application/json'}
                ))
            except:
                self.log.info("Pvc " + self.pvc_name + " already exists, so did not create new pod.")
        pod_manifest = self.get_pod_manifest()
        yield self.httpclient.fetch(self.request(
            url=k8s_url(self.namespace, 'pods'),
            body=json.dumps(pod_manifest),
            method='POST',
            headers={'Content-Type': 'application/json'}
        ))
        while True:
            data = yield self.get_pod_info(self.pod_name)
            if data is not None and self.is_pod_running(data):
                break
            yield gen.sleep(1)
        self.user.server.ip = data['status']['podIP']
        self.user.server.port = 8888
        self.db.commit()

    @gen.coroutine
    def stop(self, now=False):
        body = {
            'kind': "DeleteOptions",
            'apiVersion': 'v1',
            'gracePeriodSeconds': 0
        }
        yield self.httpclient.fetch(
            self.request(
                url=k8s_url(self.namespace, 'pods', self.pod_name),
                method='DELETE',
                body=json.dumps(body),
                headers={'Content-Type': 'application/json'},
                # Tornado's client thinks DELETE requests shouldn't have a body
                # which is a bogus restriction
                allow_nonstandard_methods=True,
            )
        )
        if not now:
            # If now is true, just return immediately, do not wait for
            # shut down to complete
            while True:
                data = yield self.get_pod_info(self.pod_name)
                if data is None:
                    break
                yield gen.sleep(1)

    def _env_keep_default(self):
        return []

    def get_env(self):
        env = super(KubeSpawner, self).get_env()
        env.update({
            'JPY_USER': self.user.name,
            'JPY_COOKIE_NAME': self.user.server.cookie_name,
            'JPY_BASE_URL': self.user.server.base_url,
            'JPY_HUB_PREFIX': self.hub.server.base_url,
            'JPY_HUB_API_URL': self.accessible_hub_api_url
        })
        return env
コード例 #51
0
def asynchronous_fetch(urls):
    http_client = AsyncHTTPClient()
    return {url: http_client.fetch(url) for url in urls}
コード例 #52
0
 def get(self):
     self.write('here')
     http_client = AsyncHTTPClient()
     response = yield http_client.fetch("http://google.com")
     self.do_read(response)
コード例 #53
0
ファイル: hello.py プロジェクト: huahuijay/class3
 def get(self):
     http = AsyncHTTPClient()
     response = yield http.fetch('http://192.168.1.189:8001/third/')
     print response
     self.write('slow')
コード例 #54
0
def checkresults(db, api):
    ini = time()
    info(
        '========================================================================='
    )
    info(' Starting CV Request processing - {}'.format(
        datetime.now().isoformat()))
    info(
        '========================================================================='
    )
    try:
        time_limit = int(environ.get('CVREQ_TIMELIMIT', 7200))
    except Exception as e:
        info(e)
        time_limit = 7200
    # Check results execute GET in the CV Server URL to acquire results for cv requests
    AsyncHTTPClient.configure("tornado.curl_httpclient.CurlAsyncHTTPClient")
    http_client = AsyncHTTPClient()
    cvreqs = db.cvrequests.find()
    # retrieve data from the cursor
    lcvreqids = [x['iid'] for x in cvreqs]
    rmcv = db.cvresults.remove({'cvrequest_iid': {
        '$nin': lcvreqids
    }},
                               multi=True)
    info('    Clear cvresults without cvrequests: {}'.format(rmcv))
    # Get ids with status != finished or error
    cvreqs = db.cvrequests.find({'status': {'$nin': ['finished', 'error']}})
    # retrieve data from the cursor
    cvreqs = [x for x in cvreqs]
    info('    CV Request not finished of error - count: ' + str(len(cvreqs)))
    # Connection preset
    params = {
        'headers':
        HTTPHeaders({
            "content-type": "application/json",
            "ApiKey": api['CV_APIKEY']
        }),
        'url':
        api['CVSERVER_URL'] + '/linc/v1/classify',
        'method':
        'POST',
        'body':
        '',
        'request_timeout':
        5,
        'validate_cert':
        False
    }
    # Check if cvresults exists
    for cvreq in cvreqs:
        info(
            "========================================================================"
        )
        info(" ### Checking CV Request: " + str(cvreq['iid']) + " ###")
        info("  ## Image set submitted: " + str(cvreq['image_set_iid']) +
             " ##")
        cvres = db.cvresults.find_one({'cvrequest_iid': cvreq['iid']})
        # Restart after 10 minutes
        if cvres:
            info('  >> Created at: {}'.format(cvres['created_at']))
            info('  >>        now: {}'.format(datetime.now()))
            if (datetime.now() - cvres['created_at']).seconds > time_limit:
                #info("  !!! The recognition process took more than 10 minutes... restarting")
                info("!!! The CV Request took more than 2 hours to finish")
                info("!!! Marking it with error status")
                db.cvrequests.update({'iid': cvreq['iid']}, {
                    '$set': {
                        'status': 'error',
                        'updated_at': datetime.now()
                    }
                })
                cvrem_restart = db.cvresults.remove(
                    {'cvrequest_iid': cvreq['iid']})
                cvres = None
                info(
                    "========================================================================"
                )
                continue
        if not cvres:
            # Create the CVResults
            iid = db.counters.find_and_modify(query={'_id': 'cvresults'},
                                              update={'$inc': {
                                                  'next': 1
                                              }},
                                              new=True,
                                              upsert=True)
            newcvres = dict()
            newcvres['cvrequest_iid'] = cvreq['iid']
            newcvres['iid'] = iid['next']
            newcvres['match_probability'] = '{}'
            dt = datetime.now()
            newcvres['created_at'] = dt
            newcvres['updated_at'] = dt
            ncvresobjid = db.cvresults.insert(newcvres)
            info('CV results created id: ' + str(ncvresobjid))
            cvres = db.cvresults.find_one({'cvrequest_iid': cvreq['iid']})
        # Cvres exists, so try to get data
        info('  ## CV Results id.....: {}  ## '.format(cvres['iid']))
        req_body = loads(cvreq['request_body'])
        resp_cvr = loads(cvres['match_probability'])
        if len(resp_cvr) == 0:
            resp_cvr['cv'] = list()
            resp_cvr['whisker'] = list()
        # Check for cv results
        # cv_topk_classifier_accuracy
        # whisker_topk_classifier_accuracy
        if not req_body.get('classifiers', False):
            info(' >>> CV Request invalid - id: {}'.format(cvreq['iid']))
            info(' >>> No classifiers found.')
        else:
            # Check CV
            finished = {'cv': 0, 'whisker': 0}
            for clf in ['cv', 'whisker']:
                if req_body['classifiers'].get(clf, False):
                    info('    Processing calls for the classifier {}'.format(
                        clf.upper()))
                    add = len(resp_cvr[clf]) == 0
                    if add:
                        # Submit requests
                        for n, clf_call in enumerate(req_body[clf + '_calls']):
                            dparams = params.copy()
                            dparams['body'] = dumps(clf_call)
                            request = HTTPRequest(**dparams)
                            try:
                                response = yield http_client.fetch(request)
                            except HTTPError as e:
                                info(e)
                                response = e.response
                            except Exception as e:
                                info(e)
                                response = None
                            if response and response.code in [200, 201]:
                                info('          Call {} #{} - success'.format(
                                    clf.upper(), n))
                                resp_cvr[clf].append(
                                    loads(response.body.decode('utf-8')))
                            else:
                                info('          Call {} #{} - fail'.format(
                                    clf.upper(), n))
                                resp_cvr[clf].append('FAILURE')
                    else:
                        # Check results
                        for n, clf_call in enumerate(req_body[clf + '_calls']):
                            info(resp_cvr[clf][n])
                            # {'id': '432f7612-8b7d-4132-baae-f93f094abb7f', 'status': 'PENDING', 'errors': []}
                            if isinstance(resp_cvr[clf][n],
                                          dict) and resp_cvr[clf][n].get(
                                              'status', None) == 'finished':
                                info(
                                    '          Request CV #{} finished'.format(
                                        n))
                                finished[clf] += 1
                            else:
                                info('       Check results for CV #{}'.format(
                                    n))
                                dparams = params.copy()
                                del dparams['body']
                                dparams['method'] = 'GET'
                                url = api[
                                    'CVSERVER_URL'] + '/linc/v1/results/' + resp_cvr[
                                        clf][n]['id']
                                info('       {}'.format(url))
                                dparams['url'] = url
                                request = HTTPRequest(**dparams)
                                try:
                                    response = yield http_client.fetch(request)
                                except HTTPError as e:
                                    info(e)
                                    response = e.response
                                except Exception as e:
                                    info(e)
                                    response = None
                                if response.code in [200, 201]:
                                    info('          Call #{} - success'.format(
                                        n))
                                    resp_data = loads(
                                        response.body.decode('utf-8'))
                                    info('          Status: {}'.format(
                                        resp_data['status']))
                                    if resp_data['status'] == 'finished':
                                        info('       Resp data: {}'.format(
                                            resp_data))
                                        resp_cvr[clf][n] = resp_data.copy()
                                    elif resp_data['status'] == 'error':
                                        info(
                                            '      Forcing it to be FINISHED (Workaround)'
                                        )
                                        finished[clf] += 1
                                else:
                                    info('          Call #{} - fail'.format(n))
            dt = datetime.now()
            if finished['cv'] == len(
                    req_body['cv_calls']) and finished['whisker'] == len(
                        req_body['whisker_calls']):
                info(' Loading capabilities...')
                dparams = params.copy()
                del dparams['body']
                dparams['method'] = 'GET'
                dparams['url'] = api['CVSERVER_URL'] + '/linc/v1/capabilities'
                request = HTTPRequest(**dparams)
                try:
                    response = yield http_client.fetch(request)
                except HTTPError as e:
                    info(e)
                    response = e.response
                except Exception as e:
                    info(e)
                    response = None
                if response.code in [200, 201]:
                    info(' ### CV Request finished ###')
                    db.cvrequests.update(
                        {'iid': cvreq['iid']},
                        {'$set': {
                            'status': 'finished',
                            'updated_at': dt
                        }})
                    resp_cvr['capabilities'] = loads(
                        response.body.decode('utf-8'))
                    resp_cvr['execution'] = dt.timestamp(
                    ) - cvres['created_at'].timestamp()
                else:
                    info(' Fail to retrieve capabilities info...')
            db.cvresults.update({'cvrequest_iid': cvreq['iid']}, {
                '$set': {
                    'match_probability': dumps(resp_cvr),
                    'updated_at': dt
                }
            })
            api['cache'].delete('imgset-' + str(cvreq['image_set_iid']))
            info('   Cache delete for image set id: {}'.format(
                cvreq['image_set_iid']))
    info(
        '========================================================================='
    )
    info(' CV Request processing finished - Execution time: {0:.2f} s'.format(
        time() - ini))
    info(
        '========================================================================='
    )
コード例 #55
0
    def authenticate(self, handler, data=None):
        """We set up auth_state based on additional CILogon info if we
        receive it.
        """
        code = handler.get_argument("code")
        # TODO: Configure the curl_httpclient for tornado
        http_client = AsyncHTTPClient()

        # Exchange the OAuth code for a CILogon Access Token
        # See: http://www.cilogon.org/oidc
        headers = {
            "Accept": "application/json",
            "User-Agent": "JupyterHub",
        }

        params = dict(
            client_id=self.client_id,
            client_secret=self.client_secret,
            redirect_uri=self.oauth_callback_url,
            code=code,
            grant_type='authorization_code',
        )

        url = url_concat("https://%s/oauth2/token" % CILOGON_HOST, params)

        req = HTTPRequest(url, headers=headers, method="POST", body='')

        resp = yield http_client.fetch(req)
        token_response = json.loads(resp.body.decode('utf8', 'replace'))
        access_token = token_response['access_token']
        self.log.info("Access token acquired.")
        # Determine who the logged in user is
        params = dict(access_token=access_token)
        req = HTTPRequest(url_concat(
            "https://%s/oauth2/userinfo" % CILOGON_HOST, params),
                          headers=headers)
        resp = yield http_client.fetch(req)
        resp_json = json.loads(resp.body.decode('utf8', 'replace'))

        claimlist = [self.username_claim]
        if self.additional_username_claims:
            claimlist.extend(self.additional_username_claims)

        for claim in claimlist:
            username = resp_json.get(claim)
            if username:
                break
        if not username:
            if len(claimlist) < 2:
                self.log.error("Username claim %s not found in response: %s",
                               self.username_claim, sorted(resp_json.keys()))
            else:
                self.log.error("No username claim from %r in response: %s",
                               claimlist, sorted(resp_json.keys()))
            raise web.HTTPError(500, "Failed to get username from CILogon")

        if self.idp_whitelist:
            gotten_name, gotten_idp = username.split('@')
            if gotten_idp not in self.idp_whitelist:
                self.log.error(
                    "Trying to login from not whitelisted domain %s",
                    gotten_idp)
                raise web.HTTPError(
                    500, "Trying to login from not whitelisted domain")
            if len(self.idp_whitelist) == 1 and self.strip_idp_domain:
                username = gotten_name
        userdict = {"name": username}
        # Now we set up auth_state
        userdict["auth_state"] = auth_state = {}
        # Save the token response and full CILogon reply in auth state
        # These can be used for user provisioning
        #  in the Lab/Notebook environment.
        auth_state['token_response'] = token_response
        # store the whole user model in auth_state.cilogon_user
        # keep access_token as well, in case anyone was relying on it
        auth_state['access_token'] = access_token
        auth_state['cilogon_user'] = resp_json
        return userdict
コード例 #56
0
 def get(self):
     cli = AsyncHTTPClient()
     uri = self.wechat_uuid_uri.format(
         **{'timestamp': self.get_timestamp()})
     yield cli.fetch(uri, self.callback)
コード例 #57
0
class TrackerRequest(Task):
    def __init__(self,
                 name,
                 tracker_url,
                 tracker_command,
                 may_be_canceled=False):
        Task.__init__(self, name)
        self.http_client = AsyncHTTPClient()
        self.tracker_url = tracker_url
        self.tracker_command = tracker_command
        self.retry_delay = 30
        self._set_may_be_canceled = may_be_canceled

    def enqueue(self, item):
        self.start_item(item)
        item.log_output("Starting %s for %s\n" % (self, item.description()))
        self.send_request(item)

    def send_request(self, item):
        if item.canceled:
            return

        if self._set_may_be_canceled:
            item.may_be_canceled = False
        self.http_client.fetch(
            HTTPRequest("%s/%s" % (self.tracker_url, self.tracker_command),
                        method="POST",
                        headers={"Content-Type": "application/json"},
                        user_agent=("ArchiveTeam Warrior/%s %s %s" %
                                    (seesaw.__version__, seesaw.runner_type,
                                     seesaw.warrior_build)).strip(),
                        body=json.dumps(self.data(item))),
            functools.partial(self.handle_response, item))

    def data(self, item):
        return {}

    def handle_response(self, item, response):
        if response.code == 200:
            self.process_body(response.body, item)
        else:
            if response.code == 420 or response.code == 429:
                r = "Tracker rate limiting is active. We don't want to overload the site we're archiving, so we've limited the number of downloads per minute. Please wait... "
            elif response.code == 404:
                r = "No item received. "
            elif response.code == 455:
                r = "Project code is out of date and needs to be upgraded. "
            elif response.code == 599:
                r = "No HTTP response received from tracker. "
            else:
                r = "Tracker returned status code %d. \n" % (response.code)
            self.schedule_retry(item, r)

    def schedule_retry(self, item, message=""):
        if self._set_may_be_canceled:
            item.may_be_canceled = True
        item.log_output("%sRetrying after %d seconds...\n" %
                        (message, self.retry_delay))
        IOLoop.instance().add_timeout(
            datetime.timedelta(seconds=self.retry_delay),
            functools.partial(self.send_request, item))
コード例 #58
0
ファイル: hello.py プロジェクト: huahuijay/class3
 def get(self):
     http = AsyncHTTPClient()
     http.fetch('http://192.168.1.189:8001/third/', callback=self.on_response)
     self.write('slow')
コード例 #59
0
    def post(self):
        retjson = {'code': 200, 'content': ''}
        cardnum = self.get_argument('cardnum', default=None)
        password = self.get_argument('password', default=None)
        if not (cardnum and password):
            retjson['code'] = 400
            retjson['content'] = 'params lack'
        else:
            # read from cache
            try:
                status = self.db.query(PeDetailCache).filter(
                    PeDetailCache.cardnum == cardnum).one()
                if int(strftime('%H', localtime(
                        time()))) < 8 or (status.date > int(time()) - 10000
                                          and status.text != '*'):
                    if status.text == '*':
                        retjson['content'] = []
                        self.write(
                            json.dumps(retjson, ensure_ascii=False, indent=2))
                        self.finish()
                        return
                    self.write(base64.b64decode(status.text))
                    self.finish()
                    return
            except NoResultFound:
                status = PeDetailCache(cardnum=cardnum,
                                       text='*',
                                       date=int(time()))
                self.db.add(status)
                try:
                    self.db.commit()
                except:
                    self.db.rollback()

            try:
                client = AsyncHTTPClient()
                data = {
                    "Login.Token1": cardnum,
                    "Login.Token2": password,
                    'goto': "http://mynew.seu.edu.cn/loginSuccess.portal",
                    'gotoOnFail': "http://mynew.seu.edu.cn/loginFailure.portal"
                }
                data1 = {
                    'IDToken0': '',
                    'IDToken1': cardnum,
                    'IDToken2': password,
                    'IDButton': 'Submit',
                    'goto': 'http://zccx.seu.edu.cn/',
                    'gx_charset': 'gb2312'
                }

                cookie1 = ''
                request = HTTPRequest(loginurl1,
                                      method='POST',
                                      body=urllib.urlencode(data1),
                                      follow_redirects=False)
                initcookie = ''
                try:
                    response = yield client.fetch(request)
                except HTTPError as e:
                    initcookie = e.response.headers['Set-Cookie']
                init_cookie1 = initcookie.split(';')[4].split(',')[
                    1]  #+initcookie.split(';')[0]
                header = {
                    'Host': 'zccx.seu.edu.cn',
                    'Accept':
                    'textml,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
                    'User-Agent':
                    'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/45.0.2454.101 Safari/537.36',
                    'Referer': 'http://zccx.seu.edu.cn/',
                    'Connection': 'Keep-alive',
                    'Accept-Encoding': 'gzip, deflate',
                    'Accept-Language': 'zh-CN,zh;q=0.8',
                    'Cookie':
                    init_cookie1 + ';' + cookie1 + ';' + ';amblcookie=02'
                }
                request = HTTPRequest(runurl, method='GET', headers=header)

                response = yield client.fetch(request)
                cookie1 = response.headers['Set-Cookie']
                header[
                    'Cookie'] = init_cookie1 + ';' + cookie1 + ';' + ';amblcookie=02'
                getpeurl = "http://zccx.seu.edu.cn/SportWeb/gym/gymExercise/gymExercise_query_result_2.jsp?xh=%s" % (
                    cardnum)
                request = HTTPRequest(getpeurl,
                                      headers=header,
                                      request_timeout=8)
                response = yield client.fetch(request)
                spider = RunningParser()
                spider.getRunningTable(response.body)
                retjson['content'] = spider.table
            except Exception, e:
                retjson['code'] = 500
                retjson['content'] = str(e)
コード例 #60
0
 def get(self):
     client = AsyncHTTPClient()
     resp = yield client.fetch("http://int.dpool.sina.com.cn/iplookup/iplookup.php?format=json&ip=14.130.112.24")
     json_data = resp.body 
     data = json.loads(json_data)
     self.write(data.get("city", ""))