Beispiel #1
0
 async def _stop_cluster(self, cluster_name):
     url = "%s/gateway/api/clusters/%s" % (self.address, cluster_name)
     req = HTTPRequest(url=url, method="DELETE")
     await self._fetch(req)
Beispiel #2
0
 def test_request_set(self):
     proxy = _RequestProxy(
         HTTPRequest("http://example.com/", user_agent="foo"), dict()
     )
     self.assertEqual(proxy.user_agent, "foo")
Beispiel #3
0
 def test_both_set(self):
     proxy = _RequestProxy(
         HTTPRequest("http://example.com/", proxy_host="foo"), dict(proxy_host="bar")
     )
     self.assertEqual(proxy.proxy_host, "foo")
Beispiel #4
0
    def authenticate(self, handler, data=None):
        """We set up auth_state based on additional CILogon info if we
        receive it.
        """
        code = handler.get_argument("code")
        # TODO: Configure the curl_httpclient for tornado
        http_client = AsyncHTTPClient()

        # Exchange the OAuth code for a CILogon Access Token
        # See: http://www.cilogon.org/oidc
        headers = {
            "Accept": "application/json",
            "User-Agent": "JupyterHub",
        }

        params = dict(
            client_id=self.client_id,
            client_secret=self.client_secret,
            redirect_uri=self.oauth_callback_url,
            code=code,
            grant_type='authorization_code',
        )

        url = url_concat("https://%s/oauth2/token" % CILOGON_HOST, params)

        req = HTTPRequest(url, headers=headers, method="POST", body='')

        resp = yield http_client.fetch(req)
        token_response = json.loads(resp.body.decode('utf8', 'replace'))
        access_token = token_response['access_token']
        self.log.info("Access token acquired.")
        # Determine who the logged in user is
        params = dict(access_token=access_token)
        req = HTTPRequest(url_concat(
            "https://%s/oauth2/userinfo" % CILOGON_HOST, params),
                          headers=headers)
        resp = yield http_client.fetch(req)
        resp_json = json.loads(resp.body.decode('utf8', 'replace'))

        username = resp_json.get(self.username_claim)
        if not username:
            self.log.error("Username claim %s not found in the response: %s",
                           self.username_claim, sorted(resp_json.keys()))
            raise web.HTTPError(500, "Failed to get username from CILogon")

        if self.idp_whitelist:
            gotten_name, gotten_idp = username.split('@')
            if gotten_idp not in self.idp_whitelist:
                self.log.error(
                    "Trying to login from not whitelisted domain %s",
                    gotten_idp)
                raise web.HTTPError(
                    500, "Trying to login from not whitelisted domain")
            if len(self.idp_whitelist) == 1 and self.strip_idp_domain:
                username = gotten_name
        userdict = {"name": username}
        # Now we set up auth_state
        userdict["auth_state"] = auth_state = {}
        # Save the token response and full CILogon reply in auth state
        # These can be used for user provisioning
        #  in the Lab/Notebook environment.
        auth_state['token_response'] = token_response
        # store the whole user model in auth_state.cilogon_user
        # keep access_token as well, in case anyone was relying on it
        auth_state['access_token'] = access_token
        auth_state['cilogon_user'] = resp_json
        return userdict
Beispiel #5
0
def get_http_request(url, method='GET', body=None):
    return HTTPRequest(url, method=method, body=body)
Beispiel #6
0
def get(url, params):
    req = HTTPRequest(url=url + '?' + params, method="GET")
    return req
 async def make_request(url, method='GET', body=None, timeout=None):
     request = HTTPRequest(url, method, body=body, request_timeout=timeout)
     response = await Task(http_client.fetch, request)
     if response.error:
         raise response.error
     return response
Beispiel #8
0
 def test_both_set(self):
     proxy = _RequestProxy(
         HTTPRequest('http://example.com/', proxy_host='foo'),
         dict(proxy_host='bar'))
     self.assertEqual(proxy.proxy_host, 'foo')
Beispiel #9
0
 def test_bad_attribute(self):
     proxy = _RequestProxy(HTTPRequest('http://example.com/'), dict())
     with self.assertRaises(AttributeError):
         proxy.foo
Beispiel #10
0
 def test_request_set(self):
     proxy = _RequestProxy(
         HTTPRequest('http://example.com/', user_agent='foo'), dict())
     self.assertEqual(proxy.user_agent, 'foo')
Beispiel #11
0
 def test_default_set(self):
     proxy = _RequestProxy(HTTPRequest('http://example.com/'),
                           dict(network_interface='foo'))
     self.assertEqual(proxy.network_interface, 'foo')
async def mock_gateway_request(url, **kwargs):
    method = 'GET'
    if kwargs['method']:
        method = kwargs['method']

    request = HTTPRequest(url=url, **kwargs)

    endpoint = str(url)

    # Fetch all kernelspecs
    if endpoint.endswith('/api/kernelspecs') and method == 'GET':
        response_buf = StringIO(str_to_unicode(json.dumps(kernelspecs)))
        response = await ensure_async(HTTPResponse(request, 200, buffer=response_buf))
        return response

    # Fetch named kernelspec
    if endpoint.rfind('/api/kernelspecs/') >= 0 and method == 'GET':
        requested_kernelspec = endpoint.rpartition('/')[2]
        kspecs = kernelspecs.get('kernelspecs')
        if requested_kernelspec in kspecs:
            response_buf = StringIO(str_to_unicode(json.dumps(kspecs.get(requested_kernelspec))))
            response = await ensure_async(HTTPResponse(request, 200, buffer=response_buf))
            return response
        else:
            raise HTTPError(404, message='Kernelspec does not exist: %s' % requested_kernelspec)

    # Create kernel
    if endpoint.endswith('/api/kernels') and method == 'POST':
        json_body = json.loads(kwargs['body'])
        name = json_body.get('name')
        env = json_body.get('env')
        kspec_name = env.get('KERNEL_KSPEC_NAME')
        assert name == kspec_name   # Ensure that KERNEL_ env values get propagated
        model = generate_model(name)
        running_kernels[model.get('id')] = model  # Register model as a running kernel
        response_buf = StringIO(str_to_unicode(json.dumps(model)))
        response = await ensure_async(HTTPResponse(request, 201, buffer=response_buf))
        return response

    # Fetch list of running kernels
    if endpoint.endswith('/api/kernels') and method == 'GET':
        kernels = []
        for kernel_id in running_kernels.keys():
            model = running_kernels.get(kernel_id)
            kernels.append(model)
        response_buf = StringIO(str_to_unicode(json.dumps(kernels)))
        response = await ensure_async(HTTPResponse(request, 200, buffer=response_buf))
        return response

    # Interrupt or restart existing kernel
    if endpoint.rfind('/api/kernels/') >= 0 and method == 'POST':
        requested_kernel_id, sep, action = endpoint.rpartition('/api/kernels/')[2].rpartition('/')

        if action == 'interrupt':
            if requested_kernel_id in running_kernels:
                response = await ensure_async(HTTPResponse(request, 204))
                return response
            else:
                raise HTTPError(404, message='Kernel does not exist: %s' % requested_kernel_id)
        elif action == 'restart':
            if requested_kernel_id in running_kernels:
                response_buf = StringIO(str_to_unicode(json.dumps(running_kernels.get(requested_kernel_id))))
                response = await ensure_async(HTTPResponse(request, 204, buffer=response_buf))
                return response
            else:
                raise HTTPError(404, message='Kernel does not exist: %s' % requested_kernel_id)
        else:
            raise HTTPError(404, message='Bad action detected: %s' % action)

    # Shutdown existing kernel
    if endpoint.rfind('/api/kernels/') >= 0 and method == 'DELETE':
        requested_kernel_id = endpoint.rpartition('/')[2]
        running_kernels.pop(requested_kernel_id)  # Simulate shutdown by removing kernel from running set
        response = await ensure_async(HTTPResponse(request, 204))
        return response

    # Fetch existing kernel
    if endpoint.rfind('/api/kernels/') >= 0 and method == 'GET':
        requested_kernel_id = endpoint.rpartition('/')[2]
        if requested_kernel_id in running_kernels:
            response_buf = StringIO(str_to_unicode(json.dumps(running_kernels.get(requested_kernel_id))))
            response = await ensure_async(HTTPResponse(request, 200, buffer=response_buf))
            return response
        else:
            raise HTTPError(404, message='Kernel does not exist: %s' % requested_kernel_id)
Beispiel #13
0
    async def _fetch_access_token(self, code, redirect_uri, client_id,
                                  client_secret):
        """
        Fetches the access token.

        Arguments
        ----------
        code:
          The response code from the server
        redirect_uri:
          The redirect URI
        client_id:
          The client ID
        client_secret:
          The client secret
        state:
          The unguessable random string to protect against cross-site
          request forgery attacks
        """
        if not client_secret:
            raise ValueError('The client secret is undefined.')

        log.debug("%s making access token request.", type(self).__name__)

        http = self.get_auth_http_client()

        params = {
            'code': code,
            'redirect_uri': redirect_uri,
            'client_id': client_id,
            'client_secret': client_secret,
            **self._EXTRA_TOKEN_PARAMS
        }

        url = url_concat(self._OAUTH_ACCESS_TOKEN_URL, params)

        # Request the access token.
        req = HTTPRequest(url,
                          method="POST",
                          headers=self._API_BASE_HEADERS,
                          body='')
        try:
            response = await http.fetch(req)
        except HTTPError as e:
            return self._on_error(e.response)

        body = decode_response_body(response)

        if not body:
            return

        if 'access_token' not in body:
            return self._on_error(response, body)

        log.debug("%s granted access_token.", type(self).__name__)

        headers = dict(
            self._API_BASE_HEADERS, **{
                "Authorization": "Bearer {}".format(body['access_token']),
            })

        user_response = await http.fetch(self._OAUTH_USER_URL,
                                         method="GET",
                                         headers=headers)

        user = decode_response_body(user_response)

        if not user:
            return

        log.debug("%s received user information.", type(self).__name__)

        return self._on_auth(user, body['access_token'])
Beispiel #14
0
def test_extra_traefik_config():
    extra_static_config_dir = os.path.join(CONFIG_DIR, "traefik_config.d")
    os.makedirs(extra_static_config_dir, exist_ok=True)

    dynamic_config_dir = os.path.join(STATE_DIR, "rules")
    os.makedirs(dynamic_config_dir, exist_ok=True)

    extra_static_config = {
        "entryPoints": {
            "no_auth_api": {
                "address": "127.0.0.1:9999"
            }
        },
        "api": {
            "dashboard": True,
            "entrypoint": "no_auth_api"
        },
    }

    extra_dynamic_config = {
        "frontends": {
            "test": {
                "backend": "test",
                "routes": {
                    "rule1": {
                        "rule": "PathPrefixStrip: /the/hub/runs/here/too"
                    }
                },
            }
        },
        "backends": {
            # redirect to hub
            "test": {
                "servers": {
                    "server1": {
                        "url": "http://127.0.0.1:15001"
                    }
                }
            }
        },
    }

    success = False
    for i in range(5):
        time.sleep(i)
        try:
            with pytest.raises(HTTPClientError,
                               match="HTTP 401: Unauthorized"):
                # The default dashboard entrypoint requires authentication, so it should fail
                req = HTTPRequest("http://127.0.0.1:8099/dashboard/",
                                  method="GET")
                HTTPClient().fetch(req)
            success = True
            break
        except Exception as e:
            pass

    assert success == True

    # write the extra static config
    with open(os.path.join(extra_static_config_dir, "extra.toml"),
              "w+") as extra_config_file:
        toml.dump(extra_static_config, extra_config_file)

    # write the extra dynamic config
    with open(os.path.join(dynamic_config_dir, "extra_rules.toml"),
              "w+") as extra_config_file:
        toml.dump(extra_dynamic_config, extra_config_file)

    # load the extra config
    reload_component("proxy")

    # the new dashboard entrypoint shouldn't require authentication anymore
    resp = send_request(url="http://127.0.0.1:9999/dashboard/", max_sleep=5)
    assert resp.code == 200

    # test extra dynamic config
    resp = send_request(url="http://127.0.0.1/the/hub/runs/here/too",
                        max_sleep=5)
    assert resp.code == 200
    assert "http://127.0.0.1/hub/login" in resp.effective_url

    # cleanup
    os.remove(os.path.join(extra_static_config_dir, "extra.toml"))
    os.remove(os.path.join(dynamic_config_dir, "extra_rules.toml"))
    open(os.path.join(STATE_DIR, "traefik.toml"), "w").close()
    def post(self):
        username = self.get_argument('username', default=None)
        pwd = self.get_argument('password', default=None)
        status = None

        retjson = {'code': 200, 'content': ''}
        if not (username or pwd):
            retjson['code'] = 400
            retjson['content'] = 'params lack'
        else:
            # read from cache
            try:
                status = self.db.query(GpaCache).filter(
                    GpaCache.cardnum == username).one()
                if status.date > int(time()) - 129600 and status.text != '*':
                    self.write(base64.b64decode(status.text))
                    self.finish()
                    return
            except NoResultFound:
                status = GpaCache(cardnum=username, text='*', date=int(time()))
                self.db.add(status)
            try:
                self.db.commit()
            except:
                self.db.rollback()

            client = AsyncHTTPClient()
            request = HTTPRequest(VERCODE_URL, request_timeout=TIME_OUT)
            response = yield tornado.gen.Task(client.fetch, request)
            if not response.headers:
                retjson['code'] = 408
                retjson['content'] = 'time out'
            else:
                cookie = response.headers['Set-Cookie'].split(
                    ';'
                )[0]  #+";"+response.headers['Set-Cookie'].split(';')[1].split(',')[1]
                img = Image.open(io.BytesIO(response.body))
                vercode = self.recognize(img)
                params = urllib.urlencode({
                    'userName': username,
                    'password': pwd,
                    'vercode': vercode
                })
                request = HTTPRequest(LOGIN_URL,
                                      body=params,
                                      method='POST',
                                      headers={'Cookie': cookie},
                                      request_timeout=TIME_OUT)
                response = yield tornado.gen.Task(client.fetch, request)
                if not response.headers:
                    retjson['code'] = 408
                    retjson['content'] = 'time out'
                else:
                    if 'vercode' in response.body:
                        retjson['code'] = 401
                        retjson['content'] = 'wrong card number or password'
                    else:
                        request = HTTPRequest(INFO_URL,
                                              request_timeout=TIME_OUT,
                                              headers={'Cookie': cookie})
                        response = yield tornado.gen.Task(
                            client.fetch, request)
                        if not response.headers:
                            retjson['code'] = 408
                            retjson['content'] = 'time out'
                        else:
                            retjson['content'] = self.parser(response.body)
        ret = json.dumps(retjson, ensure_ascii=False, indent=2)
        self.write(ret)
        self.finish()
        # refresh cache
        if retjson['code'] == 200:
            status.date = int(time())
            status.text = base64.b64encode(ret)
            self.db.add(status)
            try:
                self.db.commit()
            except Exception, e:
                self.db.rollback()
            finally:
Beispiel #16
0
 def test_defaults_none(self):
     proxy = _RequestProxy(HTTPRequest('http://example.com/'), None)
     self.assertIs(proxy.auth_username, None)
Beispiel #17
0
    def github_api_request(self, api_url, etag=None):
        client = AsyncHTTPClient()
        if self.auth:
            # Add auth params. After logging!
            api_url = url_concat(api_url, self.auth)

        headers = {}
        if etag:
            headers['If-None-Match'] = etag
        req = HTTPRequest(api_url, headers=headers, user_agent="BinderHub")

        try:
            resp = yield client.fetch(req)
        except HTTPError as e:
            if e.code == 304:
                resp = e.response
            elif (e.code == 403 and e.response
                  and e.response.headers.get('x-ratelimit-remaining') == '0'):
                rate_limit = e.response.headers['x-ratelimit-limit']
                reset_timestamp = int(e.response.headers['x-ratelimit-reset'])
                reset_seconds = int(reset_timestamp - time.time())
                self.log.error(
                    "GitHub Rate limit ({limit}) exceeded. Reset in {delta}.".
                    format(
                        limit=rate_limit,
                        delta=timedelta(seconds=reset_seconds),
                    ))
                # round expiry up to nearest 5 minutes
                minutes_until_reset = 5 * (1 + (reset_seconds // 60 // 5))

                raise ValueError(
                    "GitHub rate limit exceeded. Try again in %i minutes." %
                    minutes_until_reset)
            # Status 422 is returned by the API when we try and resolve a non
            # existent reference
            elif e.code in (404, 422):
                return None
            else:
                raise

        # record and log github rate limit
        remaining = int(resp.headers['x-ratelimit-remaining'])
        rate_limit = int(resp.headers['x-ratelimit-limit'])
        reset_timestamp = int(resp.headers['x-ratelimit-reset'])

        # record with prometheus
        GITHUB_RATE_LIMIT.set(remaining)

        # log at different levels, depending on remaining fraction
        fraction = remaining / rate_limit
        if fraction < 0.2:
            log = self.log.warning
        elif fraction < 0.5:
            log = self.log.info
        else:
            log = self.log.debug

        # str(timedelta) looks like '00:32'
        delta = timedelta(seconds=int(reset_timestamp - time.time()))
        log("GitHub rate limit remaining {remaining}/{limit}. Reset in {delta}."
            .format(
                remaining=remaining,
                limit=rate_limit,
                delta=delta,
            ))
        return resp
async def test_desired_state_versions_paging(
    server, client, order: str,
    environments_with_versions: Tuple[Dict[str, uuid.UUID],
                                      List[datetime.datetime]]):
    """Test querying desired state versions with paging, using different sorting parameters."""
    environments, timestamps = environments_with_versions
    env = environments["multiple_versions"]
    order_by_column = "version"

    result = await client.list_desired_state_versions(
        env,
        filter={
            "date": [f"gt:{timestamps[1].astimezone(datetime.timezone.utc)}"]
        },
    )
    assert result.code == 200
    assert len(result.result["data"]) == 7
    all_versions_in_expected_order = sorted(result.result["data"],
                                            key=itemgetter(order_by_column),
                                            reverse=order == "DESC")
    all_versions_in_expected_order = version_numbers(
        all_versions_in_expected_order)

    result = await client.list_desired_state_versions(
        env,
        limit=2,
        sort=f"{order_by_column}.{order}",
        filter={
            "date": [f"gt:{timestamps[1].astimezone(datetime.timezone.utc)}"]
        },
    )
    assert result.code == 200
    assert len(result.result["data"]) == 2
    assert version_numbers(
        result.result["data"]) == all_versions_in_expected_order[:2]

    assert result.result["metadata"] == {
        "total": 7,
        "before": 0,
        "after": 5,
        "page_size": 2
    }
    assert result.result["links"].get("next") is not None
    assert result.result["links"].get("prev") is None

    port = get_bind_port()
    base_url = "http://localhost:%s" % (port, )
    http_client = AsyncHTTPClient()

    # Test link for next page
    url = f"""{base_url}{result.result["links"]["next"]}"""
    assert "limit=2" in url
    assert "filter.date=" in url
    request = HTTPRequest(
        url=url,
        headers={"X-Inmanta-tid": str(env)},
    )
    response = await http_client.fetch(request, raise_error=False)
    assert response.code == 200
    response = json.loads(response.body.decode("utf-8"))
    assert version_numbers(
        response["data"]) == all_versions_in_expected_order[2:4]
    assert response["links"].get("prev") is not None
    assert response["links"].get("next") is not None
    assert response["metadata"] == {
        "total": 7,
        "before": 2,
        "after": 3,
        "page_size": 2
    }

    # Test link for next page
    url = f"""{base_url}{response["links"]["next"]}"""
    # The filters should be present for the links as well
    assert "limit=2" in url
    assert "filter.date=" in url
    request = HTTPRequest(
        url=url,
        headers={"X-Inmanta-tid": str(env)},
    )
    response = await http_client.fetch(request, raise_error=False)
    assert response.code == 200
    response = json.loads(response.body.decode("utf-8"))
    next_page_versions = version_numbers(response["data"])
    assert next_page_versions == all_versions_in_expected_order[4:6]
    assert response["links"].get("prev") is not None
    assert response["links"].get("next") is not None
    assert response["metadata"] == {
        "total": 7,
        "before": 4,
        "after": 1,
        "page_size": 2
    }

    # Test link for previous page
    url = f"""{base_url}{response["links"]["prev"]}"""
    assert "limit=2" in url
    assert "filter.date=" in url
    request = HTTPRequest(
        url=url,
        headers={"X-Inmanta-tid": str(env)},
    )
    response = await http_client.fetch(request, raise_error=False)
    assert response.code == 200
    response = json.loads(response.body.decode("utf-8"))
    prev_page_versions = version_numbers(response["data"])
    assert prev_page_versions == all_versions_in_expected_order[2:4]
    assert response["links"].get("prev") is not None
    assert response["links"].get("next") is not None
    assert response["metadata"] == {
        "total": 7,
        "before": 2,
        "after": 3,
        "page_size": 2
    }

    result = await client.list_desired_state_versions(
        env,
        limit=100,
        sort=f"{order_by_column}.{order}",
        filter={
            "date": [f"gt:{timestamps[1].astimezone(datetime.timezone.utc)}"]
        },
    )
    assert result.code == 200
    assert len(result.result["data"]) == 7
    assert version_numbers(
        result.result["data"]) == all_versions_in_expected_order

    assert result.result["metadata"] == {
        "total": 7,
        "before": 0,
        "after": 0,
        "page_size": 100
    }
Beispiel #19
0
    def author_first_submit(self):
        # The document is not part of an existing submission.
        journal_id = self.get_argument('journal_id')
        self.submission = Submission()
        self.submission.submitter = self.user
        self.submission.journal_id = journal_id
        self.submission.save()
        self.revision = SubmissionRevision()
        self.revision.submission = self.submission
        self.revision.version = "1.0.0"
        version = "1.0.0"
        # Connect a new document to the submission.
        title = self.get_argument('title')
        abstract = self.get_argument('abstract')
        contents = self.get_argument('contents')
        bibliography = self.get_argument('bibliography')
        image_ids = list(
            filter(None,
                   self.get_argument('image_ids').split(',')))
        document = Document()
        journal = Journal.objects.get(id=journal_id)
        document.owner = journal.editor
        document.title = title
        document.contents = contents
        document.bibliography = bibliography
        document.save()
        for id in image_ids:
            image = Image.objects.filter(id=id).first()
            if image is None:
                image = Image()
                image.pk = id
                image.uploader = journal.editor
                f = open(
                    os.path.join(settings.PROJECT_PATH,
                                 "base/static/img/error.png"))
                image.image.save('error.png', File(f))
                image.save()
            DocumentImage.objects.create(document=document,
                                         image=image,
                                         title='')
        self.revision.document = document
        self.revision.save()

        fidus_url = '{protocol}://{host}'.format(
            protocol=self.request.protocol, host=self.request.host)

        post_data = {
            'username': self.user.username.encode('utf8'),
            'title': title.encode('utf8'),
            'abstract': abstract.encode('utf8'),
            'first_name': self.get_argument('firstname').encode('utf8'),
            'last_name': self.get_argument('lastname').encode('utf8'),
            'email': self.user.email.encode('utf8'),
            'affiliation': self.get_argument('affiliation').encode('utf8'),
            'author_url': self.get_argument('author_url').encode('utf8'),
            'journal_id': journal.ojs_jid,
            'fidus_url': fidus_url,
            'fidus_id': self.submission.id,
            'version': version
        }

        body = urlencode(post_data)
        key = journal.ojs_key
        base_url = journal.ojs_url
        url = base_url + self.plugin_path + 'authorSubmit'
        http = AsyncHTTPClient()
        http.fetch(HTTPRequest(url_concat(url, {'key': key}), 'POST', None,
                               body),
                   callback=self.on_author_first_submit_response)
    def handle_server(user, server_name, server, server_status):
        """Handle (maybe) culling a single server

        Returns True if server is now stopped (user removable),
        False otherwise.
        """
        log_name = user['name']
        if server_name:
            log_name = '%s/%s' % (user['name'], server_name)
        if server.get('pending'):
            app_log.warning("Not culling server %s with pending %s", log_name,
                            server['pending'])
            return False

        # jupyterhub < 0.9 defined 'server.url' once the server was ready
        # as an *implicit* signal that the server was ready.
        # 0.9 adds a dedicated, explicit 'ready' field.
        # By current (0.9) definitions, servers that have no pending
        # events and are not ready shouldn't be in the model,
        # but let's check just to be safe.

        if not server.get('ready', bool(server['url'])):
            app_log.warning("Not culling not-ready not-pending server %s: %s",
                            log_name, server)
            return False

        if server.get('started'):
            age = now - parse_date(server['started'])
        else:
            # started may be undefined on jupyterhub < 0.9
            age = None

        # check last activity
        # last_activity can be None in 0.9
        if server_status['last_activity']:
            inactive = now - parse_date(server_status['last_activity'])
        else:
            # no activity yet, use start date
            # last_activity may be None with jupyterhub 0.9,
            # which introduces the 'started' field which is never None
            # for running servers
            inactive = age

        should_cull = (inactive is not None
                       and inactive.total_seconds() >= inactive_limit)
        if should_cull:
            app_log.info("Culling server %s (inactive for %s)", log_name,
                         format_td(inactive))

        if max_age and not should_cull:
            # only check started if max_age is specified
            # so that we can still be compatible with jupyterhub 0.8
            # which doesn't define the 'started' field
            if age is not None and age.total_seconds() >= max_age:
                app_log.info("Culling server %s (age: %s, inactive for %s)",
                             log_name, format_td(age), format_td(inactive))
                should_cull = True

        if not should_cull:
            app_log.debug("Not culling server %s (age: %s, inactive for %s)",
                          log_name, format_td(age), format_td(inactive))
            return False

        if server_name:
            # culling a named server
            delete_url = url + "/users/%s/servers/%s" % (quote(
                user['name']), quote(server['name']))
        else:
            delete_url = url + '/users/%s/server' % quote(user['name'])

        req = HTTPRequest(
            url=delete_url,
            method='DELETE',
            headers=auth_header,
        )
        resp = yield fetch(req)
        if resp.code == 202:
            app_log.warning(
                "Server %s is slow to stop",
                log_name,
            )
            # return False to prevent culling user with pending shutdowns
            return False
        return True
Beispiel #21
0
def main():
    # give other services a moment to come up in this example
    sleep(1)

    parse_command_line()

    base_url = os.getenv('BASE_GATEWAY_HTTP_URL', 'http://localhost:8888')
    base_ws_url = os.getenv('BASE_GATEWAY_WS_URL', 'ws://localhost:8888')

    print(base_url)
    print(base_ws_url)

    client = AsyncHTTPClient()
    kernel_id = options.kernel_id
    if not kernel_id:
        response = yield client.fetch(
            '{}/api/kernels'.format(base_url),
            method='POST',
            #            auth_username='******',
            #            auth_password='******',
            body=json_encode({'name': options.lang}))
        kernel = json_decode(response.body)
        kernel_id = kernel['id']
        print(
            '''Created kernel {0}. Connect other clients with the following command:
            docker-compose run client --kernel-id={0}
            '''.format(kernel_id))

    ws_req = HTTPRequest(
        url='{}/api/kernels/{}/channels'.format(base_ws_url,
                                                url_escape(kernel_id))  #,
        #        auth_username='******',
        #        auth_password='******'
    )
    ws = yield websocket_connect(ws_req)
    print('Connected to kernel websocket')
    for x in range(0, options.times):
        print('Sending message {} of {}'.format(x + 1, options.times))
        msg_id = uuid4().hex
        # Send an execute request
        ws.write_message(
            json_encode({
                'header': {
                    'username': '',
                    'version': '5.0',
                    'session': '',
                    'msg_id': msg_id,
                    'msg_type': 'execute_request'
                },
                'parent_header': {},
                'channel': 'shell',
                'content': {
                    'code': options.code,
                    'silent': False,
                    'store_history': False,
                    'user_expressions': {},
                    'allow_stdin': False
                },
                'metadata': {},
                'buffers': {}
            }))

        # Look for stream output for the print in the execute
        while 1:
            msg = yield ws.read_message()
            msg = json_decode(msg)
            msg_type = msg['msg_type']
            print('Received message type:', msg_type)
            if msg_type == 'error':
                print('ERROR')
                print(msg)
                break
            parent_msg_id = msg['parent_header']['msg_id']
            if msg_type == 'stream' and parent_msg_id == msg_id:
                print('  Content:', msg['content']['text'])
                break

    ws.close()
    def handle_user(user):
        """Handle one user.

        Create a list of their servers, and async exec them.  Wait for
        that to be done, and if all servers are stopped, possibly cull
        the user.
        """
        # shutdown servers first.
        # Hub doesn't allow deleting users with running servers.
        # jupyterhub 0.9 always provides a 'servers' model.
        # 0.8 only does this when named servers are enabled.
        if 'servers' in user:
            servers = user['servers']
        else:
            # jupyterhub < 0.9 without named servers enabled.
            # create servers dict with one entry for the default server
            # from the user model.
            # only if the server is running.
            servers = {}
            if user['server']:
                servers[''] = {
                    'last_activity': user['last_activity'],
                    'pending': user['pending'],
                    'url': user['server'],
                }

        resp_proxy = yield fetch(req_proxy)
        proxy_routes = json.loads(resp_proxy.body.decode('utf8', 'replace'))
        # gets actual users server url to get its status
        if '/user/%s/' % (user['name']) in proxy_routes:
            user_server_url = proxy_routes['/user/%s/' %
                                           (user['name'])]['target']
            req_user_server_status = HTTPRequest(
                url=user_server_url + '/user/%s/api/status' % (user['name']),
                headers=auth_header,
            )

            resp_server_status = yield fetch(req_user_server_status)
            server_status = json.loads(
                resp_server_status.body.decode('utf8', 'replace'))

            server_futures = [
                handle_server(user, server_name, server, server_status)
                for server_name, server in servers.items()
            ]
            results = yield multi(server_futures)
        if not cull_users:
            return
        # some servers are still running, cannot cull users
        still_alive = len(results) - sum(results)
        if still_alive:
            app_log.debug("Not culling user %s with %i servers still alive",
                          user['name'], still_alive)
            return False

        should_cull = False
        if user.get('created'):
            age = now - parse_date(user['created'])
        else:
            # created may be undefined on jupyterhub < 0.9
            age = None

        # check last activity
        # last_activity can be None in 0.9
        if user['last_activity']:
            inactive = now - parse_date(user['last_activity'])
        else:
            # no activity yet, use start date
            # last_activity may be None with jupyterhub 0.9,
            # which introduces the 'created' field which is never None
            inactive = age

        should_cull = (inactive is not None
                       and inactive.total_seconds() >= inactive_limit)
        if should_cull:
            app_log.info("Culling user %s (inactive for %s)", user['name'],
                         inactive)

        if max_age and not should_cull:
            # only check created if max_age is specified
            # so that we can still be compatible with jupyterhub 0.8
            # which doesn't define the 'started' field
            if age is not None and age.total_seconds() >= max_age:
                app_log.info("Culling user %s (age: %s, inactive for %s)",
                             user['name'], format_td(age), format_td(inactive))
                should_cull = True

        if not should_cull:
            app_log.debug("Not culling user %s (created: %s, last active: %s)",
                          user['name'], format_td(age), format_td(inactive))
            return False

        req = HTTPRequest(
            url=url + '/users/%s' % user['name'],
            method='DELETE',
            headers=auth_header,
        )
        yield fetch(req)
        return True
Beispiel #23
0
 def _mk_connection(self):
     request = HTTPRequest(
         'ws://localhost:{}/connect?sub=test_channel'.format(self.port),
         headers={'Authorization': 'Test'})
     return websocket.websocket_connect(request)
def cull_idle(url,
              api_token,
              inactive_limit,
              cull_users=False,
              max_age=0,
              concurrency=10):
    """Shutdown idle single-user servers

    If cull_users, inactive *users* will be deleted as well.
    """
    auth_header = {
        'Authorization': 'token %s' % api_token,
    }
    req_users = HTTPRequest(
        url=url + '/users',
        headers=auth_header,
    )

    req_proxy = HTTPRequest(
        url=url + '/proxy',
        headers=auth_header,
    )

    now = datetime.now(timezone.utc)
    client = AsyncHTTPClient()

    if concurrency:
        semaphore = Semaphore(concurrency)

        @coroutine
        def fetch(req):
            """client.fetch wrapped in a semaphore to limit concurrency"""
            yield semaphore.acquire()
            try:
                return (yield client.fetch(req))
            finally:
                yield semaphore.release()
    else:
        fetch = client.fetch

    resp_users = yield fetch(req_users)
    users = json.loads(resp_users.body.decode('utf8', 'replace'))

    futures = []

    @coroutine
    def handle_server(user, server_name, server, server_status):
        """Handle (maybe) culling a single server

        Returns True if server is now stopped (user removable),
        False otherwise.
        """
        log_name = user['name']
        if server_name:
            log_name = '%s/%s' % (user['name'], server_name)
        if server.get('pending'):
            app_log.warning("Not culling server %s with pending %s", log_name,
                            server['pending'])
            return False

        # jupyterhub < 0.9 defined 'server.url' once the server was ready
        # as an *implicit* signal that the server was ready.
        # 0.9 adds a dedicated, explicit 'ready' field.
        # By current (0.9) definitions, servers that have no pending
        # events and are not ready shouldn't be in the model,
        # but let's check just to be safe.

        if not server.get('ready', bool(server['url'])):
            app_log.warning("Not culling not-ready not-pending server %s: %s",
                            log_name, server)
            return False

        if server.get('started'):
            age = now - parse_date(server['started'])
        else:
            # started may be undefined on jupyterhub < 0.9
            age = None

        # check last activity
        # last_activity can be None in 0.9
        if server_status['last_activity']:
            inactive = now - parse_date(server_status['last_activity'])
        else:
            # no activity yet, use start date
            # last_activity may be None with jupyterhub 0.9,
            # which introduces the 'started' field which is never None
            # for running servers
            inactive = age

        should_cull = (inactive is not None
                       and inactive.total_seconds() >= inactive_limit)
        if should_cull:
            app_log.info("Culling server %s (inactive for %s)", log_name,
                         format_td(inactive))

        if max_age and not should_cull:
            # only check started if max_age is specified
            # so that we can still be compatible with jupyterhub 0.8
            # which doesn't define the 'started' field
            if age is not None and age.total_seconds() >= max_age:
                app_log.info("Culling server %s (age: %s, inactive for %s)",
                             log_name, format_td(age), format_td(inactive))
                should_cull = True

        if not should_cull:
            app_log.debug("Not culling server %s (age: %s, inactive for %s)",
                          log_name, format_td(age), format_td(inactive))
            return False

        if server_name:
            # culling a named server
            delete_url = url + "/users/%s/servers/%s" % (quote(
                user['name']), quote(server['name']))
        else:
            delete_url = url + '/users/%s/server' % quote(user['name'])

        req = HTTPRequest(
            url=delete_url,
            method='DELETE',
            headers=auth_header,
        )
        resp = yield fetch(req)
        if resp.code == 202:
            app_log.warning(
                "Server %s is slow to stop",
                log_name,
            )
            # return False to prevent culling user with pending shutdowns
            return False
        return True

    @coroutine
    def handle_user(user):
        """Handle one user.

        Create a list of their servers, and async exec them.  Wait for
        that to be done, and if all servers are stopped, possibly cull
        the user.
        """
        # shutdown servers first.
        # Hub doesn't allow deleting users with running servers.
        # jupyterhub 0.9 always provides a 'servers' model.
        # 0.8 only does this when named servers are enabled.
        if 'servers' in user:
            servers = user['servers']
        else:
            # jupyterhub < 0.9 without named servers enabled.
            # create servers dict with one entry for the default server
            # from the user model.
            # only if the server is running.
            servers = {}
            if user['server']:
                servers[''] = {
                    'last_activity': user['last_activity'],
                    'pending': user['pending'],
                    'url': user['server'],
                }

        resp_proxy = yield fetch(req_proxy)
        proxy_routes = json.loads(resp_proxy.body.decode('utf8', 'replace'))
        # gets actual users server url to get its status
        if '/user/%s/' % (user['name']) in proxy_routes:
            user_server_url = proxy_routes['/user/%s/' %
                                           (user['name'])]['target']
            req_user_server_status = HTTPRequest(
                url=user_server_url + '/user/%s/api/status' % (user['name']),
                headers=auth_header,
            )

            resp_server_status = yield fetch(req_user_server_status)
            server_status = json.loads(
                resp_server_status.body.decode('utf8', 'replace'))

            server_futures = [
                handle_server(user, server_name, server, server_status)
                for server_name, server in servers.items()
            ]
            results = yield multi(server_futures)
        if not cull_users:
            return
        # some servers are still running, cannot cull users
        still_alive = len(results) - sum(results)
        if still_alive:
            app_log.debug("Not culling user %s with %i servers still alive",
                          user['name'], still_alive)
            return False

        should_cull = False
        if user.get('created'):
            age = now - parse_date(user['created'])
        else:
            # created may be undefined on jupyterhub < 0.9
            age = None

        # check last activity
        # last_activity can be None in 0.9
        if user['last_activity']:
            inactive = now - parse_date(user['last_activity'])
        else:
            # no activity yet, use start date
            # last_activity may be None with jupyterhub 0.9,
            # which introduces the 'created' field which is never None
            inactive = age

        should_cull = (inactive is not None
                       and inactive.total_seconds() >= inactive_limit)
        if should_cull:
            app_log.info("Culling user %s (inactive for %s)", user['name'],
                         inactive)

        if max_age and not should_cull:
            # only check created if max_age is specified
            # so that we can still be compatible with jupyterhub 0.8
            # which doesn't define the 'started' field
            if age is not None and age.total_seconds() >= max_age:
                app_log.info("Culling user %s (age: %s, inactive for %s)",
                             user['name'], format_td(age), format_td(inactive))
                should_cull = True

        if not should_cull:
            app_log.debug("Not culling user %s (created: %s, last active: %s)",
                          user['name'], format_td(age), format_td(inactive))
            return False

        req = HTTPRequest(
            url=url + '/users/%s' % user['name'],
            method='DELETE',
            headers=auth_header,
        )
        yield fetch(req)
        return True

    for user in users:
        futures.append((user['name'], handle_user(user)))

    for (name, f) in futures:
        try:
            result = yield f
        except Exception:
            app_log.exception("Error processing %s", name)
        else:
            if result:
                app_log.debug("Finished culling %s", name)
Beispiel #25
0
    def authenticate(self, handler, data=None):
        code = handler.get_argument("code")
        # TODO: Configure the curl_httpclient for tornado
        http_client = AsyncHTTPClient()

        # Exchange the OAuth code for a GitLab Access Token
        #
        # See: https://github.com/gitlabhq/gitlabhq/blob/master/doc/api/oauth2.md

        # GitLab specifies a POST request yet requires URL parameters
        params = dict(
            client_id=self.client_id,
            client_secret=self.client_secret,
            code=code,
            grant_type="authorization_code",
            redirect_uri=self.get_callback_url(handler),
        )

        validate_server_cert = self.validate_server_cert

        url = url_concat("%s/oauth/token" % GITLAB_URL, params)

        req = HTTPRequest(
            url,
            method="POST",
            headers={"Accept": "application/json"},
            validate_cert=validate_server_cert,
            body=''  # Body is required for a POST...
        )

        resp = yield http_client.fetch(req)
        resp_json = json.loads(resp.body.decode('utf8', 'replace'))

        access_token = resp_json['access_token']

        # Determine who the logged in user is
        req = HTTPRequest("%s/user" % GITLAB_API,
                          method="GET",
                          validate_cert=validate_server_cert,
                          headers=_api_headers(access_token))
        resp = yield http_client.fetch(req)
        resp_json = json.loads(resp.body.decode('utf8', 'replace'))

        username = resp_json["username"]
        user_id = resp_json["id"]
        is_admin = resp_json.get("is_admin", False)

        # Check if user is a member of any whitelisted groups or projects.
        # These checks are performed here, as it requires `access_token`.
        user_in_group = user_in_project = False
        is_group_specified = is_project_id_specified = False

        if self.gitlab_group_whitelist:
            is_group_specified = True
            user_in_group = yield self._check_group_whitelist(
                user_id, access_token)

        # We skip project_id check if user is in whitelisted group.
        if self.gitlab_project_id_whitelist and not user_in_group:
            is_project_id_specified = True
            user_in_project = yield self._check_project_id_whitelist(
                user_id, access_token)

        no_config_specified = not (is_group_specified
                                   or is_project_id_specified)

        if (is_group_specified and user_in_group) or \
            (is_project_id_specified and user_in_project) or \
                no_config_specified:
            return {
                'name': username,
                'auth_state': {
                    'access_token': access_token,
                    'gitlab_user': resp_json,
                }
            }
        else:
            self.log.warning("%s not in group or project whitelist", username)
            return None
    def test_auth_token(self):
        """All server endpoints should check the configured auth token."""
        # Set token requirement
        app = self.get_app()
        app.settings['kg_auth_token'] = 'fake-token'

        # Requst API without the token
        response = yield self.http_client.fetch(self.get_url('/api'),
                                                method='GET',
                                                raise_error=False)
        self.assertEqual(response.code, 401)

        # Now with it
        response = yield self.http_client.fetch(
            self.get_url('/api'),
            method='GET',
            headers={'Authorization': 'token fake-token'},
            raise_error=False)
        self.assertEqual(response.code, 200)

        # Request kernelspecs without the token
        response = yield self.http_client.fetch(
            self.get_url('/api/kernelspecs'), method='GET', raise_error=False)
        self.assertEqual(response.code, 401)

        # Now with it
        response = yield self.http_client.fetch(
            self.get_url('/api/kernelspecs'),
            method='GET',
            headers={'Authorization': 'token fake-token'},
            raise_error=False)
        self.assertEqual(response.code, 200)

        # Request a kernel without the token
        response = yield self.http_client.fetch(self.get_url('/api/kernels'),
                                                method='POST',
                                                body='{}',
                                                raise_error=False)
        self.assertEqual(response.code, 401)

        # Request with the token now
        response = yield self.http_client.fetch(
            self.get_url('/api/kernels'),
            method='POST',
            body='{}',
            headers={'Authorization': 'token fake-token'},
            raise_error=False)
        self.assertEqual(response.code, 201)

        kernel = json_decode(response.body)
        # Request kernel info without the token
        response = yield self.http_client.fetch(
            self.get_url('/api/kernels/' + url_escape(kernel['id'])),
            method='GET',
            raise_error=False)
        self.assertEqual(response.code, 401)

        # Now with it
        response = yield self.http_client.fetch(
            self.get_url('/api/kernels/' + url_escape(kernel['id'])),
            method='GET',
            headers={'Authorization': 'token fake-token'},
            raise_error=False)
        self.assertEqual(response.code, 200)

        # Request websocket connection without the token
        ws_url = 'ws://localhost:{}/api/kernels/{}/channels'.format(
            self.get_http_port(), url_escape(kernel['id']))
        # No option to ignore errors so try/except
        try:
            ws = yield websocket_connect(ws_url)
        except Exception as ex:
            self.assertEqual(ex.code, 401)
        else:
            self.assert_(False, 'no exception raised')

        # Now request the websocket with the token
        ws_req = HTTPRequest(ws_url,
                             headers={'Authorization': 'token fake-token'})
        ws = yield websocket_connect(ws_req)
        ws.close()
Beispiel #27
0
 def test_default_set(self):
     proxy = _RequestProxy(
         HTTPRequest("http://example.com/"), dict(network_interface="foo")
     )
     self.assertEqual(proxy.network_interface, "foo")
Beispiel #28
0
    def __init__(self, url, rows, cols, msg_handler, user_token):
        super().__init__(url, rows, cols, msg_handler)

        headers = {"Authorization": f"Bearer {user_token}"}
        self.url = HTTPRequest(url, headers=headers, validate_cert=False)
        self.connect()
Beispiel #29
0
 def test_neither_set(self):
     proxy = _RequestProxy(HTTPRequest("http://example.com/"), dict())
     self.assertIs(proxy.auth_username, None)
Beispiel #30
0
#coding=utf-8

from tornado.httpclient import HTTPClient,HTTPRequest


#GET请求
def getRequest(url):
    hc = HTTPClient()
    response = hc.fetch(url)
    return response.body

# print getRequest('http://www.bjsxt.com')


#POST请求
def postRequest(request):
    hc = HTTPClient()
    response = hc.fetch(request)
    return response.body

req = HTTPRequest(url='http://www.bjsxt.com',method='POST',headers={'User-Agent':'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36'},body='uname=zhangsan')
print postRequest(req)