Beispiel #1
0
 def __init__(self, token, api_url, http_client=None):
     super(ProxyTarget, self).__init__(token)
     self.api_url = api_url
     self.http_client = http_client or AsyncHTTPClient()
Beispiel #2
0
def zufang_spider(url):
    response = yield AsyncHTTPClient().fetch(url, raise_error=False)
    raise gen.Return(response)
Beispiel #3
0
    def get(self, kind, path):
        # builder vs. viewer
        enable_editing = (kind == 'builder')

        # Builder options
        builder_kwargs = {}
        for a in [
                'starting_reaction', 'model_name', 'map_name', 'map_json',
                'reaction_no_data_color', 'reaction_no_data_size',
                'metabolite_no_data_color', 'metabolite_no_data_size',
                'hide_secondary_nodes'
        ]:
            args = self.get_arguments(a)
            if len(args) == 1:
                builder_kwargs[a] = (True if args[0].lower() == 'true' else (
                    False if args[0].lower() == 'false' else args[0]))
        # array args
        for a in [
                'quick_jump', 'metabolite_size_range',
                'metabolite_color_range', 'reaction_size_range',
                'reaction_color_range', 'gene_styles'
        ]:
            args = self.get_arguments(a + '[]')
            if len(args) > 0:
                builder_kwargs[a] = args

        # js source
        args = self.get_arguments('js_source')
        js_source = args[0] if len(args) == 1 else 'web'

        # if the server is running locally, then the embedded css must be loaded
        # asynchronously using the same server thread.
        if js_source in ['dev', 'local']:
            global PORT
            url = get_url('builder_embed_css',
                          source='local',
                          local_host='http://localhost:%d' % PORT)
            response = yield gen.Task(AsyncHTTPClient().fetch, url)
            if response.code != 200 or response.body is None:
                raise Exception('Could not load embedded_css from %s' % url)

            builder_kwargs['embedded_css'] = (
                response.body.decode('utf-8').replace('\n', ' '))

        # example data
        def load_data_file(rel_path):
            """Load a JSON file with relative path."""
            try:
                with open(join(root_directory, rel_path), 'r') as f:
                    return json.load(f)
            except:
                logging.warn('Could not load example_data file: %s' % rel_path)

        if len(self.get_arguments('example_data')) > 0:
            r_filepath = 'escher/example_data/reaction_data_iJO1366.json'
            builder_kwargs['reaction_data'] = load_data_file(r_filepath)
            m_filepath = 'escher/example_data/metabolite_data_iJO1366.json'
            builder_kwargs['metabolite_data'] = load_data_file(m_filepath)

        # make the builder
        builder = Builder(safe=True, **builder_kwargs)

        # display options
        display_kwargs = {
            'minified_js': True,
            'scroll_behavior': 'pan',
            'menu': 'all'
        }

        # keyword
        for a in [
                'menu', 'scroll_behavior', 'minified_js',
                'auto_set_data_domain', 'never_ask_before_quit'
        ]:
            args = self.get_arguments(a)
            if len(args) == 1:
                display_kwargs[a] = (True if args[0].lower() == 'true' else (
                    False if args[0].lower() == 'false' else args[0]))

        html = builder._get_html(js_source=js_source,
                                 enable_editing=enable_editing,
                                 enable_keys=True,
                                 html_wrapper=True,
                                 fill_screen=True,
                                 height='100%',
                                 **display_kwargs)

        self.set_header("Content-Type", "text/html")
        self.serve(html)
Beispiel #4
0
 def __init__(self, pcsd_ruby_socket, debug=False):
     self.__debug = debug
     AsyncHTTPClient.configure(
         "tornado.curl_httpclient.CurlAsyncHTTPClient")
     self.__client = AsyncHTTPClient()
     self.__pcsd_ruby_socket = pcsd_ruby_socket
Beispiel #5
0
 def http_client(self):
     return AsyncHTTPClient(
         force_instance=True, defaults=dict(validate_cert=self.tls_verify)
     )
Beispiel #6
0
 def get(self):
     http_client = AsyncHTTPClient()
     response = yield http_client.fetch("http://www.sinacloud.com")
     self.set_header('content-type', 'text/plain')
     self.write('Hello, World! ' + str(response.body[:100]))
Beispiel #7
0
    async def get(self, provider_prefix, _unescaped_spec):
        prefix = '/v2/' + provider_prefix
        spec = self.get_spec_from_request(prefix)
        spec = spec.rstrip("/")
        try:
            self.get_provider(provider_prefix, spec=spec)
        except HTTPError:
            raise
        except Exception as e:
            app_log.error(
                "Failed to construct provider for %s/%s",
                provider_prefix,
                spec,
            )
            # FIXME: 400 assumes it's the user's fault (?)
            # maybe we should catch a special InvalidSpecError here
            raise HTTPError(400, str(e))

        provider_spec = f'{provider_prefix}/{spec}'
        social_desc = f"{SPEC_NAMES[provider_prefix]}: {spec}"
        nbviewer_url = None
        if provider_prefix == "gh":
            # we can only produce an nbviewer URL for github right now
            nbviewer_url = 'https://nbviewer.jupyter.org/github'
            org, repo_name, ref = spec.split('/', 2)
            # NOTE: tornado unquotes query arguments too -> notebooks%2Findex.ipynb becomes notebooks/index.ipynb
            filepath = self.get_argument('filepath', '').lstrip('/')

            # Check if we have a JupyterLab + file path, if so then use it for the filepath
            urlpath = self.get_argument('urlpath', '').lstrip('/')
            if urlpath.startswith("lab") and "/tree/" in urlpath:
                filepath = urlpath.split('tree/', 1)[-1]

            blob_or_tree = 'blob' if filepath else 'tree'
            nbviewer_url = f'{nbviewer_url}/{org}/{repo_name}/{blob_or_tree}/{ref}/{filepath}'

            # Check if the nbviewer URL is valid and would display something
            # useful to the reader, if not we don't show it
            client = AsyncHTTPClient()
            # quote any unicode characters in the URL
            proto, rest = nbviewer_url.split("://")
            rest = urllib.parse.quote(rest)

            request = HTTPRequest(
                proto + "://" + rest,
                method="HEAD",
                user_agent="BinderHub",
            )
            response = await client.fetch(request, raise_error=False)
            if response.code >= 400:
                nbviewer_url = None

        self.render_template(
            "loading.html",
            base_url=self.settings['base_url'],
            badge_base_url=self.get_badge_base_url(),
            provider_spec=provider_spec,
            social_desc=social_desc,
            nbviewer_url=nbviewer_url,
            # urlpath=self.get_argument('urlpath', None),
            submit=True,
            google_analytics_code=self.settings['google_analytics_code'],
            google_analytics_domain=self.settings['google_analytics_domain'],
            extra_footer_scripts=self.settings['extra_footer_scripts'],
        )
 def get_app_http_client(self):
     AsyncHTTPClient.configure(
         'tornado.curl_httpclient.CurlAsyncHTTPClient')
     return AsyncHTTPClient(force_instance=True)
Beispiel #9
0
    def post(self, sessionid):
        rclient = self.settings['rclient']
        gen_log.info('sessionid ' + str(sessionid))
        if not sessionid:
            self.render("sorry.html",
                        reason="Invalid Session. This link is not valid")
            return
        session = rclient.get(sessionid)
        if not session:
            self.render("sorry.html",
                        reason="Invalid Session. This link is not valid")
            return

        session = pickle.loads(session)
        if not session:
            rclient.delete(sessionid)
            self.render("sorry.html",
                        reason="Invalid Session. This link is not valid")
            return

        inbounddb = self.settings['inbounddb']

        user = yield inbounddb.users.find_one({'actual': session['actual']})
        sup_user = yield inbounddb.suspended_users.find_one(
            {'actual': session['actual']})
        if not user and not sup_user:
            rclient.delete(sessionid)
            self.render("sorry.html",
                        reason="Invalid Attempt. This link is not valid")
            return

        otp = self.get_argument('otp', 'junk')
        http_client = AsyncHTTPClient()
        response = yield http_client.fetch(
            "https://cognalys.com/api/v1/otp/confirm/?app_id=" +
            self.settings['coganlys_app_id'] + "&access_token=" +
            self.settings['cognalys_acc_token'] + "&keymatch=" +
            session['keymatch'] + "&otp=" + session['otpstart'] + otp,
            raise_error=False)
        if response.code != 200:
            self.render("sorry.html",
                        reason="Invalid OTP. Verification Failed")
            rclient.delete(sessionid)
            return
        resdata = json.loads(response.body.decode())
        gen_log.info('coganlys verify response data ' + str(resdata))
        if resdata['status'] != 'success':
            self.render("sorry.html",
                        reason="Invalid OTP. Verification Failed")
            rclient.delete(sessionid)
            return

        reason = "Thank You for verifing phone number.\n"
        if session.get('user_data', None):
            ud = pickle.loads(session['user_data']['sud'])
            ud['suspended'] = 'False'
            ud['verify_count'] = 0
            ud['phone_verified'] = 'True'
            yield inbounddb.users.insert(ud)
            if session['activation'] == 'True':
                reason += 'Your account has been activated\n'
                yield inbounddb.suspended_users.remove(
                    {'actual': ud['actual']})
        else:
            yield inbounddb.users.update({'actual': session['actual']},
                                         {'$set': {
                                             'phone_verified': 'True'
                                         }})
            yield inbounddb.users.update({'actual': session['actual']},
                                         {"$set": {
                                             'verify_count': 0
                                         }})

        self.render("success.html", reason=reason)
        rclient.delete(sessionid)
        return
Beispiel #10
0
            if app["domain"].strip() == "":
                continue
            domain = app["domain"].strip()

            dt = datetime.date.today()
            today = datetime.datetime(dt.year, dt.month, dt.day)
            r = alexa_collection.find_one(({"domain": domain, "date": today}))
            if r is not None:
                continue

            total += 1
            url = "http://www.alexa.com/siteinfo/%s" % app["domain"]
            request(url, lambda r, app=app: handle_alexa_com_result(r, app))

            total += 1
            url = "http://www.alexa.cn/index.php?url=%s" % app["domain"]
            request(url, lambda r, app=app: handle_alexa_cn_result(r, app))

            flag = True

        conn.close()


if __name__ == "__main__":
    logger.info("Start...")
    AsyncHTTPClient.configure("tornado.curl_httpclient.CurlAsyncHTTPClient")
    http_client = AsyncHTTPClient(max_clients=50)
    instance = tornado.ioloop.IOLoop.instance()
    begin()
    instance.start()
 def get_app_http_client(self):
     AsyncHTTPClient.configure(
         'tornado.simple_httpclient.SimpleAsyncHTTPClient')
     return AsyncHTTPClient(force_instance=True)
Beispiel #12
0
def fetch_html(url):
    http_cli = AsyncHTTPClient()
    respone = yield http_cli.fetch(url)
    return respone
Beispiel #13
0
 def httpclient_instance(self):
     return AsyncHTTPClient()
    def _create_user_name(self, user_uuid=None, data_uuid=None, ip=None):
        logging.info("create anonymous user_uuid: %s, ip: %s" %
                     (user_uuid, ip))
        if user_uuid == None or ip == None or data_uuid == None:
            return

        url = "http://123.57.154.168:8099/IP2GEO/"
        http_headers = {"Content-Type": "application/json"}

        http_body = {
            "ip": ip,
            "language":
            _get_config().get("server").get("language").get("locale"),
            "team_uuid": _get_config().get("team").get("app_uuid"),
            "team_name": _get_config().get("team").get("name")
        }

        http_request = HTTPRequest(url,
                                   method='POST',
                                   headers=http_headers,
                                   validate_cert=False,
                                   body=json.dumps(http_body))

        http_client = AsyncHTTPClient()
        response = yield http_client.fetch(http_request)

        logging.info("geoservice return: %s" % response.body)
        _body = json.loads(response.body)

        if _body == None or _body.get("error_code") != 0:
            logging.error("cant get user name by ip: %s" % ip)
            return

        _country = _body.get("country")
        _state = _body.get("state")
        _city = _body.get("city")
        _location_user = []

        if _country != None and len(_country) != 0:
            _location_user.append(_country)

        if _state != None and len(_state) != 0:
            _location_user.append(_state)

        if _city != None and len(_city) != 0:
            _location_user.append(_city)

        if len(_location_user) == 0:
            return

        _user_name = ".".join(_location_user)
        _row = DeviceUser(uuid=user_uuid,
                          user_name=_user_name,
                          user_fullname=_user_name)
        _row.update_redis_keys(self.application.redis)
        _row.async_update(self.application.redis)

        _row = AppUserData(uuid=data_uuid, user_fullname=_user_name)
        _row.update_redis_keys(self.application.redis)
        _row.async_update(self.application.redis)

        return
Beispiel #15
0
 def __init__(self, application, request, **kwargs):
     super().__init__(application, request, **kwargs)
     self.client = AsyncHTTPClient()
Beispiel #16
0
    async def post(cls,
                   url,
                   params=None,
                   body=None,
                   headers=None,
                   encode_type='utf-8',
                   decode_type='utf-8',
                   parse_json=True,
                   timeout=30):
        """ HTTP POST 请求
        @param url 请求url
        @param params 请求的uri qurey参数
        @param body 请求的body参数
        @param headers 请求的header参数
        @param encode_type 请求body编码格式,默认使用utf-8编码
        @param decode_type 返回body解码格式,默认使用utf-8解码
        @param parse_json 是否解析返回body为json格式,默认为True
        @param timeout 请求超时时间,默认30秒
        @return data 返回的http body
        """

        if params:
            url = url_concat(url, params)

        if body:
            if not encode_type:
                pass
            elif encode_type == 'utf-8':
                body = json.dumps(body)
            else:
                body = urlencode(body, encoding=encode_type)
        http_client = AsyncHTTPClient()
        response = await http_client.fetch(url,
                                           method='POST',
                                           body=body,
                                           headers=headers,
                                           request_timeout=timeout)

        if response.code not in (200, 201, 202, 203, 204, 205, 206):
            logger.error('url:',
                         url,
                         'post data:',
                         body,
                         'response code:',
                         response.code,
                         'response body:',
                         response.body,
                         caller=cls)
            msg = '请求url失败: {url}'.format(url=url)
            raise exceptions.CustomException(msg=msg)

        if response.body:
            data = response.body

            if decode_type:
                data = data.decode(decode_type)

            if parse_json:
                return json.loads(data)
            else:
                return data
        else:
            return None
Beispiel #17
0
async def get_lms_access_token(token_endpoint: str,
                               private_key_path: str,
                               client_id: str,
                               scope=None) -> str:
    """
    Gets an access token from the LMS Token endpoint by using the private key (pem format) and client id

    Args:
        token_endpoint: The url that will be used to make the request
        private_key_path: specify where the pem is
        client_id: For LTI 1.3 the Client ID that was obtained with the tool setup

    Returns:
        A json with the token value
    """
    token_params = {
        "iss": client_id,
        "sub": client_id,
        "aud": token_endpoint,
        "iat": int(time.time()) - 5,
        "exp": int(time.time()) + 60,
        "jti": str(uuid.uuid4()),
    }
    logger.debug("Getting lms access token with parameters %s" % token_params)
    # get the pem-encoded content
    private_key = get_pem_text_from_file(private_key_path)

    headers = get_headers_to_jwt_encode(private_key)

    token = jwt.encode(token_params,
                       private_key,
                       algorithm="RS256",
                       headers=headers)
    logger.debug("Obtaining token %s" % token)
    scope = scope or " ".join([
        "https://purl.imsglobal.org/spec/lti-ags/scope/score",
        "https://purl.imsglobal.org/spec/lti-ags/scope/lineitem",
        "https://purl.imsglobal.org/spec/lti-ags/scope/result.readonly",
        "https://purl.imsglobal.org/spec/lti-ags/scope/lineitem.readonly",
    ])
    logger.debug("Scope is %s" % scope)
    params = {
        "grant_type": "client_credentials",
        "client_assertion_type":
        "urn:ietf:params:oauth:client-assertion-type:jwt-bearer",
        "client_assertion": token.decode(),
        "scope": scope,
    }
    logger.debug("OAuth parameters are %s" % params)
    client = AsyncHTTPClient()
    body = urllib.parse.urlencode(params)
    try:
        resp = await client.fetch(token_endpoint,
                                  method="POST",
                                  body=body,
                                  headers=None)
    except HTTPClientError as e:
        logger.info(
            f"Error by obtaining a token with lms. Detail: {e.response.body if e.response else e.message}"
        )
        raise
    logger.debug("Token response body is %s" % json.loads(resp.body))
    return json.loads(resp.body)
Beispiel #18
0
def async_http(url):
    '''Fetch a URL asynchronously'''
    httpclient = AsyncHTTPClient()
    result = yield httpclient.fetch(url)
    raise gen.Return(result.body)
Beispiel #19
0
    def get(self):
        self.log.info('AUTH.')
        user = yield self.get_current_user()
        token = self.get_argument('gcube-token')
        if user and token:
            self.log.info('Clearing login cookie, new user?')
            self.clear_login_cookie()
            # make sure we don't do a mess here
            self.redirect(url_concat(
                self.authenticator.login_url(self.hub.base_url),
                {'gcube-token': token}),
                          permanent=False)
            return
        if not token:
            self.log.error('No gcube token. Out!')
            raise web.HTTPError(403)
        http_client = AsyncHTTPClient()
        # discover user info
        user_url = url_concat(
            url_path_join(D4SCIENCE_SOCIAL_URL, D4SCIENCE_PROFILE),
            {'gcube-token': token})
        req = HTTPRequest(user_url, method='GET')
        try:
            resp = yield http_client.fetch(req)
        except HTTPError as e:
            # whatever, get out
            self.log.warning('Something happened with gcube service: %s', e)
            raise web.HTTPError(403)
        resp_json = json.loads(resp.body.decode('utf8', 'replace'))
        username = resp_json.get('result', {}).get('username', '')
        if not username:
            self.log.error('Unable to get the user from gcube?')
            raise web.HTTPError(403)

        # discover WPS
        self.log.info('Discover wps')
        wps_endpoint = ''
        discovery_url = url_concat(D4SCIENCE_DM_REGISTRY_URL,
                                   {'gcube-token': token})
        req = HTTPRequest(discovery_url, method='GET')
        try:
            self.log.info('fetch')
            resp = yield http_client.fetch(req)
        except HTTPError as e:
            # whatever, get out
            self.log.warning('Something happened with gcube service: %s', e)
            raise web.HTTPError(403)
        root = ElementTree.fromstring(resp.body.decode('utf8', 'replace'))
        self.log.info('root %s', root)
        for child in root.findall('Resource/Profile/AccessPoint/'
                                  'Interface/Endpoint'):
            entry_name = child.attrib["EntryName"]
            self.log.info('entry_name %s', entry_name)
            if entry_name != "GetCapabilities":
                wps_endpoint = child.text
                self.log.info('WPS endpoint: %s', wps_endpoint)
                break

        self.log.info('D4Science user is %s', username)
        self.log.info('WPS %s', wps_endpoint)
        data = {
            'gcube-token': token,
            'gcube-user': username,
            'wps-endpoint': wps_endpoint
        }
        data.update(resp_json['result'])
        user = yield self.login_user(data)
        if user:
            self._jupyterhub_user = user
            self.redirect(self.get_next_url(user), permanent=False)
Beispiel #20
0
def async_http2(url1, url2):
    '''Fetch two URLs asynchronously'''
    httpclient = AsyncHTTPClient()
    r1, r2 = yield [httpclient.fetch(url1), httpclient.fetch(url2)]
    raise gen.Return(r1.body + r2.body)
Beispiel #21
0
    async def get(self, provider_prefix, _unescaped_spec):
        prefix = "/v2/" + provider_prefix
        spec = self.get_spec_from_request(prefix)
        spec = spec.rstrip("/")
        try:
            self.get_provider(provider_prefix, spec=spec)
        except HTTPError:
            raise
        except Exception as e:
            app_log.error(
                "Failed to construct provider for %s/%s",
                provider_prefix,
                spec,
            )
            # FIXME: 400 assumes it's the user's fault (?)
            # maybe we should catch a special InvalidSpecError here
            raise HTTPError(400, str(e))

        provider_spec = f"{provider_prefix}/{spec}"
        social_desc = f"{SPEC_NAMES[provider_prefix]}: {spec}"
        nbviewer_url = None
        if provider_prefix == "gh":
            # We can only produce an nbviewer URL for github right now
            nbviewer_url = "https://nbviewer.jupyter.org/github"
            org, repo_name, ref = spec.split("/", 2)
            # NOTE: tornado unquotes query arguments too -> notebooks%2Findex.ipynb becomes notebooks/index.ipynb
            filepath = self.get_argument("labpath", "").lstrip("/")
            if not filepath:
                filepath = self.get_argument("filepath", "").lstrip("/")

            # Check the urlpath parameter for a file path, if so use it for the filepath
            urlpath = self.get_argument("urlpath", "").lstrip("/")
            if urlpath and "/tree/" in urlpath:
                filepath = urlpath.split("tree/", 1)[-1]

            blob_or_tree = "blob" if filepath else "tree"
            nbviewer_url = (
                f"{nbviewer_url}/{org}/{repo_name}/{blob_or_tree}/{ref}/{filepath}"
            )

            # Check if the nbviewer URL is valid and would display something
            # useful to the reader, if not we don't show it
            client = AsyncHTTPClient()
            # quote any unicode characters in the URL
            proto, rest = nbviewer_url.split("://")
            rest = urllib.parse.quote(rest)

            request = HTTPRequest(
                proto + "://" + rest,
                method="HEAD",
                user_agent="BinderHub",
            )
            response = await client.fetch(request, raise_error=False)
            if response.code >= 400:
                nbviewer_url = None

        build_token = jwt.encode(
            {
                "exp":
                int(time.time()) +
                self.settings["build_token_expires_seconds"],
                "aud":
                provider_spec,
                "origin":
                self.token_origin(),
            },
            key=self.settings["build_token_secret"],
            algorithm="HS256",
        )
        self.render_template(
            "loading.html",
            base_url=self.settings["base_url"],
            badge_base_url=self.get_badge_base_url(),
            build_token=build_token,
            provider_spec=provider_spec,
            social_desc=social_desc,
            nbviewer_url=nbviewer_url,
            # urlpath=self.get_argument('urlpath', None),
            submit=True,
            google_analytics_code=self.settings["google_analytics_code"],
            google_analytics_domain=self.settings["google_analytics_domain"],
            extra_footer_scripts=self.settings["extra_footer_scripts"],
        )
 def get_testerhome(self):
     http_client = AsyncHTTPClient()
     response = yield http_client.fetch("https://testerhome.com/")
     raise gen.Return(response.body)
Beispiel #23
0
def cull_idle(url, api_token, inactive_limit, cull_users=False, max_age=0, concurrency=10):
    """Shutdown idle single-user servers

    If cull_users, inactive *users* will be deleted as well.
    """
    auth_header = {
        'Authorization': 'token %s' % api_token,
    }
    req = HTTPRequest(
        url=url + '/users',
        headers=auth_header,
    )
    now = datetime.now(timezone.utc)
    client = AsyncHTTPClient()

    if concurrency:
        semaphore = Semaphore(concurrency)
        @coroutine
        def fetch(req):
            """client.fetch wrapped in a semaphore to limit concurrency"""
            yield semaphore.acquire()
            try:
                return (yield client.fetch(req))
            finally:
                yield semaphore.release()
    else:
        fetch = client.fetch

    resp = yield fetch(req)
    users = json.loads(resp.body.decode('utf8', 'replace'))
    futures = []

    @coroutine
    def handle_server(user, server_name, server):
        """Handle (maybe) culling a single server

        Returns True if server is now stopped (user removable),
        False otherwise.
        """
        log_name = user['name']
        if server_name:
            log_name = '%s/%s' % (user['name'], server_name)
        if server.get('pending'):
            app_log.warning(
                "Not culling server %s with pending %s",
                log_name, server['pending'])
            return False

        # jupyterhub < 0.9 defined 'server.url' once the server was ready
        # as an *implicit* signal that the server was ready.
        # 0.9 adds a dedicated, explicit 'ready' field.
        # By current (0.9) definitions, servers that have no pending
        # events and are not ready shouldn't be in the model,
        # but let's check just to be safe.

        if not server.get('ready', bool(server['url'])):
            app_log.warning(
                "Not culling not-ready not-pending server %s: %s",
                log_name, server)
            return False

        if server.get('started'):
            age = now - parse_date(server['started'])
        else:
            # started may be undefined on jupyterhub < 0.9
            age = None

        # check last activity
        # last_activity can be None in 0.9
        if server['last_activity']:
            inactive = now - parse_date(server['last_activity'])
        else:
            # no activity yet, use start date
            # last_activity may be None with jupyterhub 0.9,
            # which introduces the 'started' field which is never None
            # for running servers
            inactive = age

        should_cull = (inactive is not None and
                       inactive.total_seconds() >= inactive_limit)
        if should_cull:
            app_log.info(
                "Culling server %s (inactive for %s)",
                log_name, format_td(inactive))

        if max_age and not should_cull:
            # only check started if max_age is specified
            # so that we can still be compatible with jupyterhub 0.8
            # which doesn't define the 'started' field
            if age is not None and age.total_seconds() >= max_age:
                app_log.info(
                    "Culling server %s (age: %s, inactive for %s)",
                    log_name, format_td(age), format_td(inactive))
                should_cull = True

        if not should_cull:
            app_log.debug(
                "Not culling server %s (age: %s, inactive for %s)",
                log_name, format_td(age), format_td(inactive))
            return False

        if server_name:
            # culling a named server
            delete_url = url + "/users/%s/servers/%s" % (
                quote(user['name']), quote(server['name'])
            )
        else:
            delete_url = url + '/users/%s/server' % quote(user['name'])

        req = HTTPRequest(
            url=delete_url, method='DELETE', headers=auth_header,
        )
        resp = yield fetch(req)
        if resp.code == 202:
            app_log.warning(
                "Server %s is slow to stop",
                log_name,
            )
            # return False to prevent culling user with pending shutdowns
            return False
        return True

    @coroutine
    def handle_user(user):
        """Handle one user.

        Create a list of their servers, and async exec them.  Wait for
        that to be done, and if all servers are stopped, possibly cull
        the user.
        """
        # shutdown servers first.
        # Hub doesn't allow deleting users with running servers.
        # jupyterhub 0.9 always provides a 'servers' model.
        # 0.8 only does this when named servers are enabled.
        if 'servers' in user:
            servers = user['servers']
        else:
            # jupyterhub < 0.9 without named servers enabled.
            # create servers dict with one entry for the default server
            # from the user model.
            # only if the server is running.
            servers = {}
            if user['server']:
                servers[''] = {
                    'last_activity': user['last_activity'],
                    'pending': user['pending'],
                    'url': user['server'],
                }
        server_futures = [
            handle_server(user, server_name, server)
            for server_name, server in servers.items()
        ]
        results = yield multi(server_futures)
        if not cull_users:
            return
        # some servers are still running, cannot cull users
        still_alive = len(results) - sum(results)
        if still_alive:
            app_log.debug(
                "Not culling user %s with %i servers still alive",
                user['name'], still_alive)
            return False

        should_cull = False
        if user.get('created'):
            age = now - parse_date(user['created'])
        else:
            # created may be undefined on jupyterhub < 0.9
            age = None

        # check last activity
        # last_activity can be None in 0.9
        if user['last_activity']:
            inactive = now - parse_date(user['last_activity'])
        else:
            # no activity yet, use start date
            # last_activity may be None with jupyterhub 0.9,
            # which introduces the 'created' field which is never None
            inactive = age

        should_cull = (inactive is not None and
                       inactive.total_seconds() >= inactive_limit)
        if should_cull:
            app_log.info(
                "Culling user %s (inactive for %s)",
                user['name'], inactive)

        if max_age and not should_cull:
            # only check created if max_age is specified
            # so that we can still be compatible with jupyterhub 0.8
            # which doesn't define the 'started' field
            if age is not None and age.total_seconds() >= max_age:
                app_log.info(
                    "Culling user %s (age: %s, inactive for %s)",
                    user['name'], format_td(age), format_td(inactive))
                should_cull = True

        if not should_cull:
            app_log.debug(
                "Not culling user %s (created: %s, last active: %s)",
                user['name'], format_td(age), format_td(inactive))
            return False

        req = HTTPRequest(
            url=url + '/users/%s' % user['name'],
            method='DELETE',
            headers=auth_header,
        )
        yield fetch(req)
        return True

    for user in users:
        futures.append((user['name'], handle_user(user)))

    for (name, f) in futures:
        try:
            result = yield f
        except Exception:
            app_log.exception("Error processing %s", name)
        else:
            if result:
                app_log.debug("Finished culling %s", name)
Beispiel #24
0
 def get_http_client(self):
     return AsyncHTTPClient(io_loop=self.io_loop)
Beispiel #25
0
from urllib.parse import urlencode, quote

from tornado import gen
from tornado.httpclient import AsyncHTTPClient
from tornado.web import RequestHandler, Application
from tornado.ioloop import IOLoop

from apscheduler.schedulers.tornado import TornadoScheduler

parent_path = os.path.dirname(sys.path[0])
if parent_path not in sys.path:
    sys.path.append(parent_path)
from fileBasedConfiguration import ApplicationProperties
from constants import RequestMapping

http_client = AsyncHTTPClient()
http_header = {
    #"User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/47.0.2526.111 Safari/537.36"
    "User-Agent":
    "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) VAR/1.0.0.1"
}

# (ip, port, expire, used_times)
global_ip_pool = [([""] * 4) for i in range(1)]
global_ip_dic = {}
ip_dic_lock = threading.Lock()


async def refreship():
    maxtimes = int(
        ApplicationProperties.configure(
def cull_idle(
    url,
    api_token,
    inactive_limit,
    cull_users=False,
    remove_named_servers=False,
    max_age=0,
    concurrency=10,
    ssl_enabled=False,
    internal_certs_location="",
):
    """Shutdown idle single-user servers

    If cull_users, inactive *users* will be deleted as well.
    """
    if ssl_enabled:
        ssl_context = make_ssl_context(
            f"{internal_certs_location}/hub-internal/hub-internal.key",
            f"{internal_certs_location}/hub-internal/hub-internal.crt",
            f"{internal_certs_location}/hub-ca/hub-ca.crt",
        )

        app_log.debug("ssl_enabled is Enabled: %s", ssl_enabled)
        app_log.debug("internal_certs_location is %s", internal_certs_location)
        AsyncHTTPClient.configure(None, defaults={"ssl_options": ssl_context})

    client = AsyncHTTPClient()

    if concurrency:
        semaphore = Semaphore(concurrency)

        @coroutine
        def fetch(req):
            """client.fetch wrapped in a semaphore to limit concurrency"""
            yield semaphore.acquire()
            try:
                return (yield client.fetch(req))
            finally:
                yield semaphore.release()

    else:
        fetch = client.fetch

    # Starting with jupyterhub 1.3.0 the users can be filtered in the server
    # using the `state` filter parameter. "ready" means all users who have any
    # ready servers (running, not pending).
    auth_header = {"Authorization": "token %s" % api_token}
    resp = yield fetch(HTTPRequest(url=url + "/info", headers=auth_header))
    info = json.loads(resp.body.decode("utf8", "replace"))
    state_filter = V(info["version"]) >= STATE_FILTER_MIN_VERSION

    req = HTTPRequest(
        url=url + "/users%s" % ("?state=ready" if state_filter else ""),
        headers=auth_header,
    )
    now = datetime.now(timezone.utc)

    resp = yield fetch(req)
    users = json.loads(resp.body.decode("utf8", "replace"))
    app_log.debug("Got %d users%s", len(users),
                  (" with ready servers" if state_filter else ""))
    futures = []

    @coroutine
    def handle_server(user, server_name, server, max_age, inactive_limit):
        """Handle (maybe) culling a single server

        "server" is the entire server model from the API.

        Returns True if server is now stopped (user removable),
        False otherwise.
        """
        log_name = user["name"]
        if server_name:
            log_name = "%s/%s" % (user["name"], server_name)
        if server.get("pending"):
            app_log.warning("Not culling server %s with pending %s", log_name,
                            server["pending"])
            return False

        # jupyterhub < 0.9 defined 'server.url' once the server was ready
        # as an *implicit* signal that the server was ready.
        # 0.9 adds a dedicated, explicit 'ready' field.
        # By current (0.9) definitions, servers that have no pending
        # events and are not ready shouldn't be in the model,
        # but let's check just to be safe.

        if not server.get("ready", bool(server["url"])):
            app_log.warning("Not culling not-ready not-pending server %s: %s",
                            log_name, server)
            return False

        if server.get("started"):
            age = now - parse_date(server["started"])
        else:
            # started may be undefined on jupyterhub < 0.9
            age = None

        # check last activity
        # last_activity can be None in 0.9
        if server["last_activity"]:
            inactive = now - parse_date(server["last_activity"])
        else:
            # no activity yet, use start date
            # last_activity may be None with jupyterhub 0.9,
            # which introduces the 'started' field which is never None
            # for running servers
            inactive = age

        # CUSTOM CULLING TEST CODE HERE
        # Add in additional server tests here.  Return False to mean "don't
        # cull", True means "cull immediately", or, for example, update some
        # other variables like inactive_limit.
        #
        # Here, server['state'] is the result of the get_state method
        # on the spawner.  This does *not* contain the below by
        # default, you may have to modify your spawner to make this
        # work.  The `user` variable is the user model from the API.
        #
        # if server['state']['profile_name'] == 'unlimited'
        #     return False
        # inactive_limit = server['state']['culltime']

        should_cull = (inactive is not None
                       and inactive.total_seconds() >= inactive_limit)
        if should_cull:
            app_log.info("Culling server %s (inactive for %s)", log_name,
                         format_td(inactive))

        if max_age and not should_cull:
            # only check started if max_age is specified
            # so that we can still be compatible with jupyterhub 0.8
            # which doesn't define the 'started' field
            if age is not None and age.total_seconds() >= max_age:
                app_log.info(
                    "Culling server %s (age: %s, inactive for %s)",
                    log_name,
                    format_td(age),
                    format_td(inactive),
                )
                should_cull = True

        if not should_cull:
            app_log.debug(
                "Not culling server %s (age: %s, inactive for %s)",
                log_name,
                format_td(age),
                format_td(inactive),
            )
            return False

        body = None
        if server_name:
            # culling a named server
            # A named server can be stopped and kept available to the user
            # for starting again or stopped and removed. To remove the named
            # server we have to pass an additional option in the body of our
            # DELETE request.
            delete_url = url + "/users/%s/servers/%s" % (
                quote(user["name"]),
                quote(server["name"]),
            )
            if remove_named_servers:
                body = json.dumps({"remove": True})
        else:
            delete_url = url + "/users/%s/server" % quote(user["name"])

        req = HTTPRequest(
            url=delete_url,
            method="DELETE",
            headers=auth_header,
            body=body,
            allow_nonstandard_methods=True,
        )
        resp = yield fetch(req)
        if resp.code == 202:
            app_log.warning("Server %s is slow to stop", log_name)
            # return False to prevent culling user with pending shutdowns
            return False
        return True

    @coroutine
    def handle_user(user):
        """Handle one user.

        Create a list of their servers, and async exec them.  Wait for
        that to be done, and if all servers are stopped, possibly cull
        the user.
        """
        # shutdown servers first.
        # Hub doesn't allow deleting users with running servers.
        # jupyterhub 0.9 always provides a 'servers' model.
        # 0.8 only does this when named servers are enabled.
        if "servers" in user:
            servers = user["servers"]
        else:
            # jupyterhub < 0.9 without named servers enabled.
            # create servers dict with one entry for the default server
            # from the user model.
            # only if the server is running.
            servers = {}
            if user["server"]:
                servers[""] = {
                    "last_activity": user["last_activity"],
                    "pending": user["pending"],
                    "url": user["server"],
                }
        server_futures = [
            handle_server(user, server_name, server, max_age, inactive_limit)
            for server_name, server in servers.items()
        ]
        results = yield multi(server_futures)
        if not cull_users:
            return
        # some servers are still running, cannot cull users
        still_alive = len(results) - sum(results)
        if still_alive:
            app_log.debug(
                "Not culling user %s with %i servers still alive",
                user["name"],
                still_alive,
            )
            return False

        should_cull = False
        if user.get("created"):
            age = now - parse_date(user["created"])
        else:
            # created may be undefined on jupyterhub < 0.9
            age = None

        # check last activity
        # last_activity can be None in 0.9
        if user["last_activity"]:
            inactive = now - parse_date(user["last_activity"])
        else:
            # no activity yet, use start date
            # last_activity may be None with jupyterhub 0.9,
            # which introduces the 'created' field which is never None
            inactive = age

        should_cull = (inactive is not None
                       and inactive.total_seconds() >= inactive_limit)
        if should_cull:
            app_log.info("Culling user %s (inactive for %s)", user["name"],
                         inactive)

        if max_age and not should_cull:
            # only check created if max_age is specified
            # so that we can still be compatible with jupyterhub 0.8
            # which doesn't define the 'started' field
            if age is not None and age.total_seconds() >= max_age:
                app_log.info(
                    "Culling user %s (age: %s, inactive for %s)",
                    user["name"],
                    format_td(age),
                    format_td(inactive),
                )
                should_cull = True

        if not should_cull:
            app_log.debug(
                "Not culling user %s (created: %s, last active: %s)",
                user["name"],
                format_td(age),
                format_td(inactive),
            )
            return False

        req = HTTPRequest(url=url + "/users/%s" % user["name"],
                          method="DELETE",
                          headers=auth_header)
        yield fetch(req)
        return True

    for user in users:
        futures.append((user["name"], handle_user(user)))

    # If we filtered users by state=ready then we did not get back any which
    # are inactive, so if we're also culling users get the set of users which
    # are inactive and see if they should be culled as well.
    if state_filter and cull_users:
        req = HTTPRequest(url=url + "/users?state=inactive",
                          headers=auth_header)
        resp = yield fetch(req)
        users = json.loads(resp.body.decode("utf8", "replace"))
        app_log.debug("Got %d users with inactive servers", len(users))
        for user in users:
            futures.append((user["name"], handle_user(user)))

    for (name, f) in futures:
        try:
            result = yield f
        except Exception:
            app_log.exception("Error processing %s", name)
        else:
            if result:
                app_log.debug("Finished culling %s", name)
Beispiel #27
0
 def get_http_client(self):
     return AsyncHTTPClient(force_instance=True,
                            defaults=dict(validate_cert=False))
Beispiel #28
0
 def get(self):
     io_loop = self.request.connection.stream.io_loop
     client = AsyncHTTPClient(io_loop=io_loop)
     response = yield gen.Task(client.fetch, self.get_argument('url'))
     response.rethrow()
     self.finish(b("got response: ") + response.body)
Beispiel #29
0
 def get_http_client(self):
     return AsyncHTTPClient()
Beispiel #30
0
def up_yflow(handler, partner):
    handler.up_req_time = time.localtime()

    confId = partner["confId"]
    callback = partner["callback"]
    apikey = partner["apikey"]
    mobile = handler.mobile
    seqNo = handler.order_id
    t = int(time.time())

    data = apikey + str(confId) + mobile + str(t)
    sign = to_md5(data)

    flow = None
    k = 'private:yflow:{carrier}:{price}'.format(carrier=handler.carrier,
                                                 price=handler.price)
    flow = handler.slave.get(k)

    flow = int(flow)

    if flow is None:
        handler.up_result = 5003
        return handler.up_result

    body = {
        "confId": confId,
        "mobile": mobile,
        "flow": flow,
        "callback": callback,
        "t": t,
        "sign": sign,
        "seqNo": seqNo,
    }
    body = urlencode(body)

    url = partner["url_busi"]
    url = url + '?' + body

    result = 9999
    http_client = AsyncHTTPClient()
    try:
        request_log.info("REQU %s", body, extra={'orderid': handler.order_id})
        response = yield http_client.fetch(url,
                                           method='GET',
                                           request_timeout=120)

    except HTTPError as http_error:
        request_log.error('CALL UPSTREAM FAIL %s',
                          http_error,
                          extra={'orderid': handler.order_id})
        result = 60000 + http_error.code
        response = None

    except Exception as e:
        request_log.error('CALL UPSTREAM FAIL %s',
                          e,
                          extra={'orderid': handler.order_id})
        response = None
    finally:
        http_client.close()

    handler.up_resp_time = time.localtime()

    if response and response.code == 200:
        response_body = response.body.decode('utf8')
        request_log.info("RESP %s",
                         response_body,
                         extra={'orderid': handler.order_id})
        try:
            response_body = json.loads(response_body)
            retCode = response_body["retCode"]

            result = RESULT_MAP.get(retCode, 9)
            handler.up_result = str(result)

        except Exception as e:
            result = 9999
            handler.up_result = result
            request_log.error('PARSE UPSTREAM %s',
                              e,
                              extra={'orderid': handler.order_id})
    return result