def load_jupyter_server_extension(nb_app): global logger global widgets_dir logger = nb_app.log logger.info('Loading urth_import server extension.') web_app = nb_app.web_app widgets_dir = get_nbextension_path() # Write out a .bowerrc file to configure bower installs to # not be interactive and not to prompt for analytics bowerrc = os.path.join(widgets_dir, '.bowerrc') if os.access(bowerrc, os.F_OK) is not True: logger.debug('Writing .bowerrc at {0}'.format(bowerrc)) with open(bowerrc, 'a') as f: f.write("""{ "analytics": false, "interactive": false, "directory": "urth_components" }""") # The import handler serves from /urth_import and any requests # containing /urth_components/ will get served from the actual # urth_components directory. import_route_pattern = url_path_join(web_app.settings['base_url'], '/urth_import') components_route_pattern = url_path_join(web_app.settings['base_url'], '/urth_components/(.*)') components_path = os.path.join(widgets_dir, 'urth_components/') # Register the Urth import handler and static file handler. logger.debug('Adding handlers for {0} and {1}'.format(import_route_pattern, components_route_pattern)) web_app.add_handlers('.*$', [ (import_route_pattern, UrthImportHandler, dict(executor=ThreadPoolExecutor(max_workers=1))), (components_route_pattern, FileFindHandler, {'path': [components_path]}) ])
def create_request_handlers(self): """Create default Jupyter handlers and redefine them off of the base_url path. Assumes init_configurables() has already been called. """ handlers = [] # append the activity monitor for websocket mode handlers.append(( url_path_join('/', self.parent.base_url, r'/_api/activity'), ActivityHandler, {} )) # append tuples for the standard kernel gateway endpoints for handler in ( default_api_handlers + default_kernel_handlers + default_kernelspec_handlers + default_session_handlers + default_base_handlers ): # Create a new handler pattern rooted at the base_url pattern = url_path_join('/', self.parent.base_url, handler[0]) # Some handlers take args, so retain those in addition to the # handler class ref new_handler = tuple([pattern] + list(handler[1:])) handlers.append(new_handler) return handlers
def get(self): if not self.request.uri.startswith(EXTENSION_URL): raise_error("URI did not start with " + EXTENSION_URL) spark_request = self.spark_host + self.request.uri[len(EXTENSION_URL):] try: spark_response = requests.get(spark_request) content_type = spark_response.headers['content-type'] self.set_header("Content-Type", content_type) if "text" in content_type: # Replace all the relative links with our proxy links soup = BeautifulSoup(spark_response.text, "html.parser") for has_href in ['a', 'link']: for a in soup.find_all(has_href): if "href" in a.attrs: a['href'] = url_path_join(self.web_app, a['href']) for has_src in ['img', 'script']: for a in soup.find_all(has_src): if "src" in a.attrs: a['src'] = url_path_join(self.web_app, a['src']) client_response = str(soup) else: # Probably binary response, send it directly. client_response = spark_response.content except requests.exceptions.RequestException: client_response = json.dumps({"error": "SPARK_NOT_RUNNING"}) self.write(client_response) self.flush()
def load_jupyter_server_extension(nb_server_app): # Extract our gist client details from the config: cfg = nb_server_app.config["NotebookApp"] BaseHandler.client_id = cfg["oauth_client_id"] BaseHandler.client_secret = cfg["oauth_client_secret"] web_app = nb_server_app.web_app host_pattern = '.*$' route_pattern = url_path_join(web_app.settings['base_url'], '/create_gist') download_notebook_route_pattern = url_path_join( web_app.settings['base_url'], '/download_notebook') load_user_gists_route_pattern = url_path_join( web_app.settings['base_url'], 'load_user_gists') web_app.add_handlers(host_pattern, [(route_pattern, GistHandler), (download_notebook_route_pattern, DownloadNotebookHandler), (load_user_gists_route_pattern, LoadGistHandler)])
def test_old_files_redirect(self): """pre-2.0 'files/' prefixed links are properly redirected""" nbdir = self.notebook_dir os.mkdir(pjoin(nbdir, 'files')) os.makedirs(pjoin(nbdir, 'sub', 'files')) for prefix in ('', 'sub'): with open(pjoin(nbdir, prefix, 'files', 'f1.txt'), 'w') as f: f.write(prefix + '/files/f1') with open(pjoin(nbdir, prefix, 'files', 'f2.txt'), 'w') as f: f.write(prefix + '/files/f2') with open(pjoin(nbdir, prefix, 'f2.txt'), 'w') as f: f.write(prefix + '/f2') with open(pjoin(nbdir, prefix, 'f3.txt'), 'w') as f: f.write(prefix + '/f3') url = url_path_join('notebooks', prefix, 'files', 'f1.txt') r = self.request('GET', url) self.assertEqual(r.status_code, 200) self.assertEqual(r.text, prefix + '/files/f1') url = url_path_join('notebooks', prefix, 'files', 'f2.txt') r = self.request('GET', url) self.assertEqual(r.status_code, 200) self.assertEqual(r.text, prefix + '/files/f2') url = url_path_join('notebooks', prefix, 'files', 'f3.txt') r = self.request('GET', url) self.assertEqual(r.status_code, 200) self.assertEqual(r.text, prefix + '/f3')
def test_hidden_files(self): not_hidden = [ u'å b', u'å b/ç. d', ] hidden = [ u'.å b', u'å b/.ç d', ] dirs = not_hidden + hidden nbdir = self.notebook_dir for d in dirs: path = pjoin(nbdir, d.replace('/', os.sep)) if not os.path.exists(path): os.mkdir(path) with open(pjoin(path, 'foo'), 'w') as f: f.write('foo') with open(pjoin(path, '.foo'), 'w') as f: f.write('.foo') for d in not_hidden: path = pjoin(nbdir, d.replace('/', os.sep)) r = self.request('GET', url_path_join('files', d, 'foo')) r.raise_for_status() self.assertEqual(r.text, 'foo') r = self.request('GET', url_path_join('files', d, '.foo')) self.assertEqual(r.status_code, 404) for d in hidden: path = pjoin(nbdir, d.replace('/', os.sep)) for foo in ('foo', '.foo'): r = self.request('GET', url_path_join('files', d, foo)) self.assertEqual(r.status_code, 404)
def get(self): query_string = self.get_query_argument('qs') reindex = bool(self.get_query_argument('reindex', 'true') == 'true') if reindex: self.index.update_index() results, total = self.index.search(query_string) for result in results: rel_path = result['path'][self.work_dir_len:] if rel_path.endswith('.ipynb'): # take it at face value that the extension implies notebook url = url_path_join(self.base_url, 'notebooks', rel_path) else: url = url_path_join(self.base_url, 'edit', rel_path) # Add URLs result['url'] = url result['tree_url'] = url_path_join(self.base_url, 'tree', os.path.dirname(rel_path)) # Add relative paths result['rel_dirname'] = os.path.dirname(rel_path) result['rel_path'] = rel_path self.write(dict(results=results, total=total)) self.finish()
def create_request_handlers(self): """Create handlers and redefine them off of the base_url path. Assumes init_configurables() has already been called, and that the seed source was available there. """ handlers = [] # Register the NotebookDownloadHandler if configuration allows if self.allow_notebook_download: path = url_path_join("/", self.parent.base_url, r"/_api/source") self.log.info("Registering resource: {}, methods: (GET)".format(path)) handlers.append((path, NotebookDownloadHandler, {"path": self.parent.seed_uri})) # Register a static path handler if configuration allows if self.static_path is not None: path = url_path_join("/", self.parent.base_url, r"/public/(.*)") self.log.info("Registering resource: {}, methods: (GET)".format(path)) handlers.append((path, tornado.web.StaticFileHandler, {"path": self.static_path})) # Discover the notebook endpoints and their implementations endpoints = self.api_parser.endpoints(self.parent.kernel_manager.seed_source) response_sources = self.api_parser.endpoint_responses(self.parent.kernel_manager.seed_source) if len(endpoints) == 0: raise RuntimeError( "No endpoints were discovered. Check your notebook to make sure your cells are annotated correctly." ) # Cycle through the (endpoint_path, source) tuples and register their handlers for endpoint_path, verb_source_map in endpoints: parameterized_path = parameterize_path(endpoint_path) parameterized_path = url_path_join("/", self.parent.base_url, parameterized_path) self.log.info( "Registering resource: {}, methods: ({})".format(parameterized_path, list(verb_source_map.keys())) ) response_source_map = response_sources[endpoint_path] if endpoint_path in response_sources else {} handler_args = { "sources": verb_source_map, "response_sources": response_source_map, "kernel_pool": self.kernel_pool, "kernel_name": self.parent.kernel_manager.seed_kernelspec, } handlers.append((parameterized_path, NotebookAPIHandler, handler_args)) # Register the swagger API spec handler path = url_path_join("/", self.parent.base_url, r"/_api/spec/swagger.json") handlers.append( ( path, SwaggerSpecHandler, { "notebook_path": self.parent.seed_uri, "source_cells": self.parent.seed_notebook.cells, "cell_parser": self.api_parser, }, ) ) self.log.info("Registering resource: {}, methods: (GET)".format(path)) # Add the 404 catch-all last handlers.append(default_base_handlers[-1]) return handlers
def redirect_to_files(self, path): """make redirect logic a reusable static method so it can be called from other handlers. """ cm = self.contents_manager if cm.dir_exists(path): # it's a *directory*, redirect to /tree url = url_path_join(self.base_url, 'tree', url_escape(path)) else: orig_path = path # otherwise, redirect to /files parts = path.split('/') if not cm.file_exists(path=path) and 'files' in parts: # redirect without files/ iff it would 404 # this preserves pre-2.0-style 'files/' links self.log.warning("Deprecated files/ URL: %s", orig_path) parts.remove('files') path = '/'.join(parts) if not cm.file_exists(path=path): raise web.HTTPError(404) url = url_path_join(self.base_url, 'files', url_escape(path)) self.log.debug("Redirecting %s to %s", self.request.path, url) self.redirect(url)
def load_jupyter_server_extension(nb_app): """Load the nb anaconda client extension""" webapp = nb_app.web_app base_url = webapp.settings['base_url'] webapp.add_handlers(".*$", [ (url_path_join(base_url, r"/ac-publish"), PublishHandler), (url_path_join(base_url, r"/ac-login"), WhoAmIHandler) ]) nb_app.log.info("Enabling nb_anacondanotebook")
def load_jupyter_server_extension(nb_app): """Load the nb_anacondacloud client extension""" webapp = nb_app.web_app base_url = webapp.settings['base_url'] ns = r'anaconda-cloud' webapp.add_handlers(".*$", [ (url_path_join(base_url, ns, r"publish"), PublishHandler), (url_path_join(base_url, ns, r"login"), WhoAmIHandler) ]) nb_app.log.info("[nb_anacondacloud] enabled")
def send_file(file_path, dashboard_name, handler): ''' Posts a file to the Jupyter Dashboards Server to be served as a dashboard :param file_path: The path of the file to send :param dashboard_name: The dashboard name under which it should be made available ''' # Make information about the request Host header available for use in # constructing the urls segs = handler.request.host.split(':') hostname = segs[0] if len(segs) > 1: port = segs[1] else: port = '' protocol = handler.request.protocol # Treat empty as undefined dashboard_server = os.getenv('DASHBOARD_SERVER_URL') if dashboard_server: dashboard_server = dashboard_server.format(protocol=protocol, hostname=hostname, port=port) upload_url = url_path_join(dashboard_server, UPLOAD_ENDPOINT, escape.url_escape(dashboard_name, False)) with open(file_path, 'rb') as file_content: headers = {} token = os.getenv('DASHBOARD_SERVER_AUTH_TOKEN') if token: headers['Authorization'] = 'token {}'.format(token) result = requests.post(upload_url, files={'file': file_content}, headers=headers, timeout=60, verify=not skip_ssl_verification()) if result.status_code >= 400: raise web.HTTPError(result.status_code) # Redirect to link specified in response body res_body = result.json() if 'link' in res_body: redirect_link = res_body['link'] else: # Compute redirect link using environment variables # First try redirect URL as it might be different from internal upload URL redirect_server = os.getenv('DASHBOARD_REDIRECT_URL') if redirect_server: redirect_root = redirect_server.format(hostname=hostname, port=port, protocol=protocol) else: redirect_root = dashboard_server redirect_link = url_path_join(redirect_root, VIEW_ENDPOINT, escape.url_escape(dashboard_name, False)) handler.redirect(redirect_link) else: access_log.debug('Can not deploy, DASHBOARD_SERVER_URL not set') raise web.HTTPError(500, log_message='No dashboard server configured')
def load_jupyter_server_extension(nbapp): web_app = nbapp.web_app host_pattern = '.*$' version_route_pattern = url_path_join(web_app.settings['base_url'], '/version') listfolder_route_pattern = url_path_join(web_app.settings['base_url'], '/listfolder') getfile_route_pattern = url_path_join(web_app.settings['base_url'], '/getfile') web_app.add_handlers(host_pattern, [(version_route_pattern, VersionHandler)]) web_app.add_handlers(host_pattern, [(listfolder_route_pattern, ListFolderHandler)]) web_app.add_handlers(host_pattern, [(getfile_route_pattern, GetFileHandler)]) nbapp.log.info("[beakerx_databrowser] enabled")
def load_jupyter_server_extension(nb_server_app): # Extract our Spark server details from the config: cfg = nb_server_app.config["NotebookApp"] SparkHandler.spark_host = cfg.get("spark_host", "http://localhost:4040") web_app = nb_server_app.web_app host_pattern = ".*$" route_pattern = url_path_join( web_app.settings['base_url'], EXTENSION_URL) + ".*" web_app.add_handlers(host_pattern, [(route_pattern, SparkHandler)]) SparkHandler.web_app = url_path_join( web_app.settings['base_url'], EXTENSION_URL)
def test_default_kernel(self): # POST request r = self.kern_api._req('POST', '') kern1 = r.json() self.assertEqual(r.headers['location'], url_path_join(self.url_prefix, 'api/kernels', kern1['id'])) self.assertEqual(r.status_code, 201) self.assertIsInstance(kern1, dict) report_uri = url_path_join(self.url_prefix, 'api/security/csp-report') expected_csp = '; '.join([ "frame-ancestors 'self'", 'report-uri ' + report_uri, "default-src 'none'" ]) self.assertEqual(r.headers['Content-Security-Policy'], expected_csp)
def _req(self, verb, path, body=None, params=None): response = self.request(verb, url_path_join('api/contents', path), data=body, params=params, ) response.raise_for_status() return response
def _get_bluemix_app(self, abs_nb_path): ''' Creates a temporary git repository containing the application bundle. Redirects the user's browser to the Bluemix deploy URL with a pointer back to the git repository. :param abs_nb_path: ''' md = self._create_app_bundle(abs_nb_path, '.git') converter.add_cf_manifest( md['bundle_dir'], md['kernel_server'], md['notebook_basename'], md['tmpnb_mode'] ) converter.to_git_repository(md['bundle_dir']) # The jupyter_server already includes the base_url bundle_url_path = url_path_join('bundle', md['bundle_id'], md['notebook_basename'] + '.git' ) # Include repository URL as the argument to deployer repository = escape.url_escape(md['jupyter_server'] + bundle_url_path) self.redirect(BLUEMIX_DEPLOY+repository)
def nb_open(filename, profile='default', open_browser=True, fLOG=fLOG): """ open a notebook with an existing server, if no server can be found, it starts a new one (and the function runs until the server is closed) @param filename notebook @param profile profile to use @param open_browser open browser or not @param fLOG logging function @return a running server or None if not found """ filename = os.path.abspath(filename) server_inf = find_best_server(filename, profile) if server_inf is not None: from notebook.utils import url_path_join fLOG("Using existing server at", server_inf['notebook_dir']) path = os.path.relpath(filename, start=server_inf['notebook_dir']) url = url_path_join(server_inf['url'], 'notebooks', path) webbrowser.open(url, new=2) return server_inf else: fLOG("Starting new server") home_dir = os.path.dirname(filename) from notebook import notebookapp server = notebookapp.launch_new_instance(file_to_run=os.path.abspath(filename), notebook_dir=home_dir, open_browser=open_browser, # Avoid it seeing our own argv argv=[], ) return server
def _req(self, verb, section, body=None): response = requests.request(verb, url_path_join(self.base_url, 'api/config', section), data=body, ) response.raise_for_status() return response
def init_webapp(self): """ Initialize tornado web application with kernel handlers. Put the kernel manager in settings to appease handlers that try to reference it there. Include additional options in settings as well. """ # Redefine handlers off the base_url path handlers = [] for handler in default_kernel_handlers + default_kernelspec_handlers + default_base_handlers: # Create a new handler pattern rooted at the base_url pattern = url_path_join(self.base_url, handler[0]) # Some handlers take args, so retain those in addition to the # handler class ref new_handler = tuple([pattern] + list(handler[1:])) handlers.append(new_handler) self.web_app = web.Application( handlers=handlers, kernel_manager=self.kernel_manager, kernel_spec_manager=self.kernel_manager.kernel_spec_manager, kg_auth_token=self.auth_token, kg_allow_credentials=self.allow_credentials, kg_allow_headers=self.allow_headers, kg_allow_methods=self.allow_methods, kg_allow_origin=self.allow_origin, kg_expose_headers=self.expose_headers, kg_max_age=self.max_age, kg_max_kernels=self.max_kernels, )
def _req(self, verb, path, body=None, params=None): response = requests.request(verb, url_path_join(self.base_url, 'nbconvert', path), data=body, params=params, ) response.raise_for_status() return response
def test_from_post_zip(self): nbmodel_url = url_path_join(self.base_url(), 'api/contents/foo/testnb.ipynb') nbmodel = requests.get(nbmodel_url).json() r = self.nbconvert_api.from_post(format='latex', nbmodel=nbmodel) self.assertIn(u'application/zip', r.headers['Content-Type']) self.assertIn(u'.zip', r.headers['Content-Disposition'])
def get(self, path): self.log.warn("/api/notebooks is deprecated, use /api/contents") self.redirect(url_path_join( self.base_url, 'api/contents', path ))
def test_download(self): nbdir = self.notebook_dir.name base = self.base_url() text = 'hello' with open(pjoin(nbdir, 'test.txt'), 'w') as f: f.write(text) r = requests.get(url_path_join(base, 'files', 'test.txt')) disposition = r.headers.get('Content-Disposition', '') self.assertNotIn('attachment', disposition) r = requests.get(url_path_join(base, 'files', 'test.txt') + '?download=1') disposition = r.headers.get('Content-Disposition', '') self.assertIn('attachment', disposition) self.assertIn('filename="test.txt"', disposition)
def bundle(handler, abs_nb_path): ''' Uploads a notebook to a Jupyter Dashboard Server ''' # Get name of notebook from filename notebook_basename = os.path.basename(abs_nb_path) notebook_name = os.path.splitext(notebook_basename)[0] # Make information about the request Host header available for use in # constructing the urls segs = handler.request.host.split(':') hostname = segs[0] if len(segs) > 1: port = segs[1] else: port = '' protocol = handler.request.protocol # Treat empty as undefined dashboard_server = os.getenv('DASHBOARD_SERVER_URL') if dashboard_server: dashboard_server = dashboard_server.format(protocol=protocol, hostname=hostname, port=port) upload_url = url_path_join(dashboard_server, UPLOAD_ENDPOINT, escape.url_escape(notebook_name, False)) with open(abs_nb_path, 'rb') as notebook: headers = {} token = os.getenv('DASHBOARD_SERVER_AUTH_TOKEN') if token: # TODO: server side should expect Authorization: token <value> headers['Authorization'] = token result = requests.post(upload_url, files={'file': notebook}, headers=headers, timeout=60) if result.status_code >= 400: raise web.HTTPError(result.status_code) # Redirect for client might be different from internal upload URL redirect_server = os.getenv('DASHBOARD_REDIRECT_URL') if redirect_server: redirect_root = redirect_server.format(hostname=hostname, port=port, protocol=protocol) else: redirect_root = dashboard_server handler.redirect(url_path_join(redirect_root, VIEW_ENDPOINT, escape.url_escape(notebook_name, False))) else: access_log.debug('Can not deploy, DASHBOARD_SERVER_URL not set') raise web.HTTPError(500, log_message='No dashboard server configured')
def _kernel_id_to_url(self, kernel_id): """Builds a url for the given kernel UUID. Parameters ---------- kernel_id: kernel UUID """ return url_path_join(self.kernels_endpoint, url_escape(str(kernel_id)))
def load_jupyter_server_extension(nb_server_app): """ Called when the extension is loaded. Args: nb_server_app (NotebookWebApplication): handle to the Notebook webserver instance. """ web_app = nb_server_app.web_app host_pattern = '.*$' route_pattern = url_path_join(web_app.settings['base_url'], r'(ws\.\d+\.obj\.\d+.*)') web_app.add_handlers(host_pattern, [(route_pattern, NarrativeMainHandler)]) route_pattern = url_path_join(web_app.settings['base_url'], r'(ws\.\d+)$') web_app.add_handlers(host_pattern, [(route_pattern, NarrativeMainHandler)]) route_pattern = url_path_join(web_app.settings['base_url'], r'(\d+)$') web_app.add_handlers(host_pattern, [(route_pattern, NarrativeMainHandler)])
def websocket(self, id): loop = IOLoop() req = HTTPRequest( url_path_join(self.base_url.replace('http', 'ws', 1), 'api/kernels', id, 'channels'), headers=self.headers, ) f = websocket_connect(req, io_loop=loop) return loop.run_sync(lambda : f)
def load_jupyter_server_extension(nb_app): web_app = nb_app.web_app host_pattern = '.*$' route_pattern = url_path_join(web_app.settings['base_url'], '/search') handler_kwargs = dict(work_dir=nb_app.notebook_dir) web_app.add_handlers(host_pattern, [ (route_pattern, SearchHandler, handler_kwargs) ])
def load_jupyter_server_extension(nb_app): web_app = nb_app.web_app host_pattern = '.*$' bundler_id_regex = r'(?P<bundler_id>[A-Za-z0-9_]+)' route_url = url_path_join(web_app.settings['base_url'], '/api/bundlers/%s' % bundler_id_regex) web_app.add_handlers(host_pattern, [ (route_url, BundlerHandler, {'notebook_dir': nb_app.notebook_dir}), ])
def proxy(self): request = self.request body = request.body hub_api_url = environ.get('JUPYTERHUB_API_URL') url = url_path_join(hub_api_url, 'notebooks/' + request.uri.split('contents/')[1]) if request.method == "GET": response = yield maybe_future( self._api_request(method=request.method, url=url)) else: response = yield maybe_future( self._api_request(method=request.method, url=url, data=body)) self.set_status(response.status_code) self.finish(response.text)
def content_security_policy(self): """The default Content-Security-Policy header Can be overridden by defining Content-Security-Policy in settings['headers'] """ if 'Content-Security-Policy' in self.settings.get('headers', {}): # user-specified, don't override return self.settings['headers']['Content-Security-Policy'] return '; '.join([ "frame-ancestors 'self'", # Make sure the report-uri is relative to the base_url "report-uri " + self.settings.get('csp_report_uri', url_path_join(self.base_url, csp_report_uri)), ])
def load_jupyter_server_extension(nbapp): """ Called during notebook start """ resuseconfig = ResourceUseDisplay(parent=nbapp) nbapp.web_app.settings["nbresuse_display_config"] = resuseconfig base_url = nbapp.web_app.settings["base_url"] nbapp.web_app.add_handlers( ".*", [(url_path_join(base_url, "/api/kernels/metrics"), ApiHandler)] ) callback = ioloop.PeriodicCallback( PrometheusHandler(PSUtilMetricsLoader(nbapp)), 1000 ) callback.start()
def load_jupyter_server_extension(nb_server_app): """ Called when the extension is loaded. Args: nb_server_app (NotebookWebApplication): handle to the Notebook webserver instance. """ web_app = nb_server_app.web_app host_pattern = '.*$' route_pattern = url_path_join(web_app.settings['base_url'], '/jupyter-text2code') web_app.add_handlers(host_pattern, [(route_pattern, JupyterText2CodeHandler)]) print("loaded_jupyter_server_extension: jupyter-text2code")
def setup_handlers(web_app, host_whitelist): host_pattern = '.*$' web_app.add_handlers('.*', [ (url_path_join(web_app.settings['base_url'], r'/proxy/(.*):(\d+)(.*)'), RemoteProxyHandler, { 'absolute_url': False, 'host_whitelist': host_whitelist }), (url_path_join( web_app.settings['base_url'], r'/proxy/absolute/(.*):(\d+)(.*)'), RemoteProxyHandler, { 'absolute_url': True, 'host_whitelist': host_whitelist }), (url_path_join(web_app.settings['base_url'], r'/proxy/(\d+)(.*)'), LocalProxyHandler, { 'absolute_url': False }), (url_path_join(web_app.settings['base_url'], r'/proxy/absolute/(\d+)(.*)'), LocalProxyHandler, { 'absolute_url': True }), ])
def setup_handlers(web_app: "NotebookWebApplication", config: PRConfig): host_pattern = ".*$" base_url = url_path_join(web_app.settings["base_url"], NAMESPACE) logger = get_logger() manager_class = MANAGERS.get(config.provider) if manager_class is None: logger.error(f"No manager defined for provider '{config.provider}'.") raise NotImplementedError() manager = manager_class(config.api_base_url, config.access_token) web_app.add_handlers( host_pattern, [( url_path_join(base_url, pat), handler, { "logger": logger, "manager": manager }, ) for pat, handler in default_handlers], )
def load_jupyter_server_extension(nb_server_app): """ Called when the extension is loaded. Args: nb_server_app (NotebookWebApplication): handle to the Notebook webserver instance. """ web_app = nb_server_app.web_app nb_server_app.log.info("Loading the AdHoc serverextension") host_pattern = '.*$' route_pattern = url_path_join(web_app.settings['base_url'], 'adhoc') nb_server_app.log.info(route_pattern) web_app.add_handlers(host_pattern, [(route_pattern, AdHocHandler)])
def load_jupyter_server_extension(nb_server_app): """ Called when the extension is loaded. Args: nb_server_app (NotebookWebApplication): handle to the Notebook webserver instance. """ web_app = nb_server_app.web_app host_pattern = '.*$' #route patterns file_size_pattern = url_path_join(web_app.settings['base_url'], '/filesize/(.+$)') file_date_pattern = url_path_join(web_app.settings['base_url'], '/filedate/(.+$)') view_table_pattern = url_path_join(web_app.settings['base_url'], '/table_view/(.+$)') file_content_pattern = url_path_join( web_app.settings['base_url'], '/file_content/([^/]+)/(-?[0-9]+)/(-?[0-9]+$)') draw_chat_pattern = url_path_join( web_app.settings['base_url'], '/draw_chat/([^/]+)/([0-2])/([0-9]+)/([0-9]+)/([0-9]+)/([0-9]+$)') sort_content_pattern = url_path_join(web_app.settings['base_url'], '/sort_content/([^/]+)/([0-9]+$)') data_feature_pattern = url_path_join( web_app.settings['base_url'], '/data_feature/([^/]+)/([0-9])/([0-1])/([0-9]+$)') line_number_pattern = url_path_join(web_app.settings['base_url'], '/line_num/(.+$)') file_feature_pattern = url_path_join(web_app.settings['base_url'], '/file_feature/([^/]+)/([0-9]+$)') web_app.add_handlers(host_pattern, [(file_size_pattern, FileSizeHandler), (file_date_pattern, FileDateHandler), (view_table_pattern, ViewTableHandler), (file_content_pattern, FileContentHandler), (draw_chat_pattern, DrawChatHandler), (sort_content_pattern, SortContentHandler), (data_feature_pattern, DataFeatureHandler), (line_number_pattern, FileLineNumberHandler), (file_feature_pattern, FileFeatureHandler)])
def setup_handlers(web_app: NotebookWebApplication) -> None: host_pattern = ".*$" web_app.add_handlers( host_pattern, [ ( url_path_join( web_app.settings["base_url"], "/jupyterlab_code_formatter/formatters", ), FormattersAPIHandler, ) ], ) web_app.add_handlers( host_pattern, [ ( url_path_join( web_app.settings["base_url"], "/jupyterlab_code_formatter/format" ), FormatAPIHandler, ) ], ) web_app.add_handlers( host_pattern, [ ( url_path_join( web_app.settings["base_url"], "/jupyterlab_code_formatter/version" ), VersionAPIHandler, ) ], )
async def post(self, path: str = ""): """Create a new file in the specified path. POST /jupyter-project/files/<parent-file-path> Creates a new file applying the parameters to the Jinja template. Request json body: Dictionary of parameters for the Jinja template. """ if self.template is None: raise tornado.web.HTTPError( 404, reason="File Jinja template not found.") cm = self.contents_manager params = self.get_json_body() try: default_name = self.default_name.render(**params) except TemplateError as error: self.log.warning( f"Fail to render the default name for template '{self.template.name}'" ) default_name = cm.untitled_file ext = "".join(Path(self.template.name).suffixes) filename = default_name + ext filename = cm.increment_filename(filename, path) fullpath = url_path_join(path, filename) realpath = Path(cm.root_dir).absolute() / url2path(fullpath) if not realpath.parent.exists(): realpath.parent.mkdir(parents=True) current_loop = tornado.ioloop.IOLoop.current() try: content = await current_loop.run_in_executor( None, functools.partial(self.template.render, **params)) realpath.write_text(content) except (OSError, TemplateError) as error: raise tornado.web.HTTPError( 500, log_message= f"Fail to generate the file from template {self.template.name}.", reason=repr(error), ) model = cm.get(fullpath, content=False, type="file", format="text") self.set_status(201) self.finish(json.dumps(model, default=date_default))
def get(self, relfpath, uri, **kwargs): def _handleErr(code, msg): extra = dict(( ("relfpath", relfpath), ("uri", uri), *kwargs.items(), )) if isinstance(msg, dict): # encode msg as json msg["debugVars"] = {**msg.get("debugVars", {}), **extra} msg = simplejson.dumps(msg, ignore_nan=True) else: msg = "\n".join( (msg, ", ".join(f"{key}: {val}" for key, val in extra.items()))) self.log.error(msg) raise HTTPError(code, msg) if not relfpath: msg = f"The request was malformed; fpath should not be empty." _handleErr(400, msg) fpath = url_path_join(self.notebook_dir, relfpath) if not os.path.exists(fpath): msg = f"The request specified a file that does not exist." _handleErr(403, msg) else: try: # test opening the file with h5py with h5py.File(fpath, "r") as f: pass except Exception as e: msg = f"The request did not specify a file that `h5py` could understand.\n" f"Error: {traceback.format_exc()}" _handleErr(401, msg) try: out = self._get(fpath, uri, **kwargs) except JhdfError as e: msg = e.args[0] msg["traceback"] = traceback.format_exc() msg["type"] = "JhdfError" _handleErr(400, msg) except Exception as e: msg = f"Found and opened file, error getting contents from object specified by the uri.\n" f"Error: {traceback.format_exc()}" _handleErr(500, msg) return out
def load_jupyter_server_extension(nb_server_app): """ Called when the extension is loaded. Args: nb_server_app (NotebookWebApplication): handle to the Notebook webserver instance. """ web_app = nb_server_app.web_app host_pattern = '.*$' route_pattern = url_path_join(web_app.settings['base_url'], 'higlass/([0-9]+)/(.*)') sockets_dir = os.path.join('/tmp', getpass.getuser(), 'higlass') web_app.add_handlers( host_pattern, [(route_pattern, HiGlassProxyHandler, dict(sockets_dir=sockets_dir))])
def load_jupyter_server_extension(nb_server_app): """ Called when the extension is loaded. Args: nb_server_app (NotebookWebApplication): handle to the Notebook webserver instance. """ print("The jupyterlab-comments server extension has been loaded") web_app = nb_server_app.web_app host_pattern = '.*$' base_url = web_app.settings['base_url'] detached_comments_route_pattern = url_path_join(base_url, '/detachedComments') review_comments_route_pattern = url_path_join(base_url, '/reviewComments') refresh_interval_route_pattern = url_path_join(base_url, '/refreshInterval') remote_pull_route_pattern = url_path_join(base_url, '/remotePull') web_app.add_handlers(host_pattern, [ (detached_comments_route_pattern, DetachedCommentsHandler), (review_comments_route_pattern, ReviewCommentsHandler), (refresh_interval_route_pattern, RefreshIntervalHandler), (remote_pull_route_pattern, PullFromRemoteRepoHandler), ])
def load_jupyter_server_extension(nb_server_app): """ Called when the extension is loaded. Args: nb_server_app (NotebookApp): handle to the Notebook webserver instance. """ web_app = nb_server_app.web_app # Prepend the base_url so that it works in a jupyterhub setting base_url = web_app.settings['base_url'] endpoint = url_path_join(base_url, 'latex') handlers = [(f'{endpoint}{path_regex}', LatexHandler, { "notebook_dir": nb_server_app.notebook_dir })] web_app.add_handlers('.*$', handlers)
def load_jupyter_server_extension(nb_server_app): """ Called when the extension is loaded. Args: nb_server_app (NotebookWebApplication): handle to the Notebook webserver instance. """ cluster_id_regex = r"(?P<cluster_id>\w+-\w+-\w+-\w+-\w+)" web_app = nb_server_app.web_app base_url = web_app.settings["base_url"] get_cluster_path = url_path_join(base_url, "dask/clusters/" + cluster_id_regex) list_clusters_path = url_path_join(base_url, "dask/clusters/" + "?") get_dashboard_path = url_path_join( base_url, f"dask/dashboard/{cluster_id_regex}/(?P<proxied_path>.+)") list_dashboards_path = url_path_join( base_url, f"dask/dashboard/{cluster_id_regex}/" + "?") handlers = [ (get_cluster_path, DaskClusterHandler), (list_clusters_path, DaskClusterHandler), (get_dashboard_path, DaskDashboardHandler), (list_dashboards_path, DaskDashboardHandler), ] web_app.add_handlers(".*$", handlers)
def get(self): if not self.request.uri.startswith(self.full_url): raise_error('Request URI did not start with %s' % self.full_url) spark_url = self.host + self.request.uri[len(self.full_url):] try: spark_response = requests.get(spark_url) content_type = spark_response.headers['content-type'] self.set_header('Content-Type', content_type) if 'text/html' in content_type: # Replace all the relative links with our proxy links soup = BeautifulSoup(spark_response.text, 'html.parser') for has_href in ['a', 'link']: for a in soup.find_all(has_href): if 'href' in a.attrs: a['href'] = url_path_join(self.full_url, a['href']) for has_src in ['img', 'script']: for a in soup.find_all(has_src): if 'src' in a.attrs: a['src'] = url_path_join(self.full_url, a['src']) client_response = str(soup) else: # Probably binary response, send it directly. client_response = spark_response.content except requests.exceptions.RequestException: client_response = json.dumps({'error': 'SPARK_NOT_RUNNING'}) self.write(client_response) self.flush()
def load_jupyter_server_extension(nb_server_app): """ Called when the extension is loaded. :param nb_server_app: NotebookWebApplication :return: """ here = os.path.dirname(__file__) nb_server_app.log.info( "\n\n Megaclite Jupyter Extension loaded from %s \n\n" % here) jupyter_orbit = nb_server_app.web_app host_pattern = '.*$' route_pattern = url_path_join(jupyter_orbit.settings['base_url'], '/megaclite/(.*)') jupyter_orbit.add_handlers(host_pattern, [(route_pattern, Megaclite)])
def __init__(self, model, cfg=None, height=600, backend='nengo'): self.height = height self._started = False if cfg is None: cfg = get_ipython().mktempfile() self.server = self.start_server(cfg, model) self.port = self.server.server.server_port self.server.server.settings.prefix = None self.server.server.page_settings.backend = backend self.resource = url_path_join('/nengo', str(self.port), str(self.server.server.get_resource()))
def _req(self, verb: str, path: List[str], body=None, params=None): if body is not None: body = json.dumps(body) response = self.request( verb, url_path_join(self.url, *path), data=body, params=params ) if 400 <= response.status_code < 600: try: response.reason = response.json()["message"] except Exception: pass response.raise_for_status() return response
def load_jupyter_server_extension(nb_app): '''Loads server extension.''' nb_app.log.info('Loaded nb2kg extension') # TODO: There is no clean way to override existing handlers that are already # registered with the Tornado application. The first handler to match the # URL will handle the request, so we must prepend our handlers to override # the existing ones. web_app = nb_app.web_app pattern, handlers = web_app.handlers[0] base_url = web_app.settings['base_url'] for handler in ext_handlers[::-1]: pattern = url_path_join(base_url, handler[0]) new_handler = URLSpec(pattern, *handler[1:]) nb_app.log.info('Overriding handler %s' % new_handler) handlers.insert(0, new_handler)
async def __get_content(self, url: str, filename: str, sha: str) -> str: link = url_concat( url_path_join(url, "contents", filename), {"ref": sha}, ) try: return await self._call_github( link, media_type="application/vnd.github.v3.raw", load_json=False) except HTTPError as e: if e.status_code == 404: return "" else: raise e
def load_jupyter_server_extension(nb_server_app): """Called by Jupyter when starting the notebook manager.""" nb_server_app.log.addFilter(_ColabLoggingFilter()) app = nb_server_app.web_app url_maker = lambda path: utils.url_path_join(app.settings['base_url'], path) monitor_relative_path = '/api/colab/resources' app.add_handlers('.*$', [ (url_maker(monitor_relative_path), _handlers.ResourceUsageHandler, { 'kernel_manager': app.settings['kernel_manager'] }), (url_maker('/api/colab/drive'), _handlers.DriveHandler), ]) nb_server_app.log.info('google.colab serverextension initialized.')
def test_modify_kernel_name(self): before = self.sess_api.create('foo/nb1.ipynb').json() sid = before['id'] after = self.sess_api.modify_kernel_name( sid, before['kernel']['name']).json() self.assertEqual(after['id'], sid) self.assertEqual(after['notebook'], before['notebook']) self.assertNotEqual(after['kernel']['id'], before['kernel']['id']) # check kernel list, to be sure previous kernel was cleaned up r = requests.get(url_path_join(self.base_url(), 'api/kernels')) r.raise_for_status() kernel_list = r.json() self.assertEqual(kernel_list, [after['kernel']])
def load_jupyter_server_extension(nb_server_app): """Called by Jupyter when starting the notebook manager.""" app = nb_server_app.web_app url_maker = lambda path: utils.url_path_join(app.settings['base_url'], path ) chunked_relative_path = '/api/chunked-contents' + handlers.path_regex monitor_relative_path = '/api/colab/resources' app.add_handlers('.*$', [ (url_maker(chunked_relative_path), _handlers.ChunkedFileDownloadHandler), (url_maker(monitor_relative_path), _handlers.ResourceUsageHandler), ]) nb_server_app.log.info('google.colab serverextension initialized.')
def load_jupyter_server_extension(nb_server_app): """ Called when the extension is loaded - Adds API to server :param nb_server_app: handle to the Notebook webserver instance. :type nb_server_app: NotebookWebApplication """ _init_sub_folders() web_app = nb_server_app.web_app host_pattern = '.*$' route_pattern = url_path_join(web_app.settings['base_url'], '/retrieve') web_app.add_handlers(host_pattern, [(route_pattern, RetrieveHandler)])
def load_jupyter_server_extension(nb_server_app): """ Called when the extension is loaded. Args: nb_server_app (NotebookWebApplication): handle to the Notebook webserver instance. """ web_app = nb_server_app.web_app # web_app.log.info('My Extension Loaded') nb_server_app.log.info("egal enabled!") host_pattern = '.*$' download_pattern = url_path_join(web_app.settings['base_url'], '/draw/(.+)') web_app.add_handlers(host_pattern, [(download_pattern, DrawHandler)]) print("Called")
def load_jupyter_server_extension(nb_server_app): """ Called when the extension is loaded. Args: nb_server_app (NotebookApp): handle to the Notebook webserver instance. """ web_app = nb_server_app.web_app # Prepend the base_url so that it works in a jupyterhub setting base_url = web_app.settings['base_url'] doc_url = url_path_join(base_url, 'my_doc') doc_dir = os.path.join(os.path.dirname(__file__), 'static', 'doc', 'html') handlers = [(f'{doc_url}/(.*)', StaticFileHandler, {'path': doc_dir})] web_app.add_handlers('.*$', handlers)
def load_jupyter_server_extension(nbapp): from notebook.utils import url_path_join from .handlers import TestHandler, PublishS3Handler from .config import PublishSettings settings = PublishSettings( # add access to NotebookApp config, too parent=nbapp, # for convenient access to frontend settings config_manager=nbapp.config_manager, ) config_s3 = settings.config['PublishSettings'] notebook_dir = settings.config['NotebookApp']['notebook_dir'] if type(notebook_dir) is not str: notebook_dir = '' nbapp.log.info("notebook_dir " + notebook_dir) url = nbapp.web_app.settings['base_url'] params = dict( nbapp=nbapp, access_key=config_s3['s3_access_key_id'], secret_key=config_s3['s3_secret_access_key'], endpoint_url=config_s3['s3_endpoint_url'], region_name=config_s3['s3_region_name'], bucket=config_s3['s3_bucket'], notebook_dir=notebook_dir ) nbapp.web_app.add_handlers( r'.*', # match any host [ (url_path_join(url, '/hello'), TestHandler), (url_path_join(url, '/publish_notebook'), PublishS3Handler, params), ] ) nbapp.log.info("jupyter_extention_publish enabled!")
def post(self): km = self.kernel_manager model = self.get_json_body() if model is None: model = {'name': km.default_kernel_name} else: model.setdefault('name', km.default_kernel_name) kernel_id = km.start_kernel(kernel_name=model['name']) model = km.kernel_model(kernel_id) location = url_path_join(self.base_url, 'api', 'kernels', url_escape(kernel_id)) self.set_header('Location', location) self.set_status(201) self.finish(json.dumps(model))
def list_files(self, pr_id): git_url = url_path_join(pr_id, "/files") results = yield self.call_github(git_url) data = [] for result in results: data.append({ 'name': result["filename"], 'status': result["status"], 'additions': result["additions"], 'deletions': result["deletions"] }) return data