def put(self, function=''): script_exporter = ScriptExporter() path = urllib.parse.unquote(function) out, resources = script_exporter.from_notebook_node( nbformat.read(path, as_version=4)) data = {'name': function, 'script': out, 'raw': path} if function: self.logger.info('add functions {path}: {data}'.format(path=path, data=data)) self.db.add_function(**data) func = self.db.query_function(function) func = func.front() func.logger = self.logger schedule = func.get_schedule() if schedule and isinstance(schedule, dict): data = {'method': 'SCHEDULE', 'query': {}, 'body': {}} add_job(function, schedule, func, data) self.logger.info( 'add function to schedule, current scheduled: {count}'. format(count=len(default_scheduler.jobs))) else: self.logger.info('schedule not found or not valid, ' 'current scheduled: {count}'.format( count=len(default_scheduler.jobs))) remove_job(function) self.logger.info('handle add function {data}'.format(data=data)) self.finish({'code': 'success'}) else: self.finish({'code': 'fail', 'message': 'function name not found'})
def run(notebook, executable=None, rules=None): nb = nbformat.read(notebook, 4) extra_metadata = extract_extrametadata(nb) ret = '' passed = True rules = rules or {} extra_metadata.update(rules) if 'lines_per_cell' in extra_metadata: lines_per_cell = extra_metadata.get('lines_per_cell', -1) lintret, lintfail = lint_lines_per_cell(lines_per_cell, extra_metadata) ret += lintret passed = passed and lintfail if 'cells_per_notebook' in extra_metadata: cells_per_notebook = extra_metadata.get('cells_per_notebook', -1) lintret, lintfail = lint_cells_per_notebook(cells_per_notebook, extra_metadata) ret += lintret + '\n' passed = passed and lintfail if 'function_definitions' in extra_metadata: function_definitions = extra_metadata.get('function_definitions', -1) lintret, lintfail = lint_function_definitions(function_definitions, extra_metadata) ret += lintret + '\n' passed = passed and lintfail if 'class_definitions' in extra_metadata: class_definitions = extra_metadata.get('class_definitions', -1) lintret, lintfail = lint_class_definitions(class_definitions, extra_metadata) ret += lintret + '\n' passed = passed and lintfail if 'cell_coverage' in extra_metadata: cell_coverage = extra_metadata.get('cell_coverage', 0) lintret, lintfail = lint_cell_coverage(cell_coverage, extra_metadata) ret += lintret + '\n' passed = passed and lintfail if executable: exp = ScriptExporter() (body, resources) = exp.from_notebook_node(nb) tf = NamedTemporaryFile(mode='w', suffix='.py', delete=False) tf.write(body) executable.append(tf.name) ret2 = subprocess.run(executable, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) ret += '\n' + ret2.stdout.decode('ascii') + '\n' + ret2.stderr.decode( 'asii') os.remove(tf.name) return ret, passed
def param_rest_provider(files, endpoint): """ Returns a Param based REST API given the scripts or notebooks containing the tranquilized functions. Arguments --------- files: list(str) A list of paths being served endpoint: str The endpoint to serve the REST API on Returns ------- A Tornado routing pattern containing the route and handler """ for filename in files: extension = filename.split('.')[-1] if extension == 'py': try: run_path(filename) except Exception: param.main.warning( "Could not run app script on REST server startup.") elif extension == 'ipynb': try: import nbconvert # noqa except ImportError: raise ImportError( "Please install nbconvert to serve Jupyter Notebooks.") from nbconvert import ScriptExporter exporter = ScriptExporter() source, _ = exporter.from_filename(filename) source_dir = os.path.dirname(filename) with tempfile.NamedTemporaryFile(mode='w', dir=source_dir, delete=True) as tmp: tmp.write(source) tmp.flush() try: run_path(tmp.name, init_globals={'get_ipython': MagicMock()}) except Exception: param.main.warning( "Could not run app notebook on REST server startup.") else: raise ValueError( '{} is not a script (.py) or notebook (.ipynb)'.format( filename)) if endpoint and not endpoint.endswith('/'): endpoint += '/' return [((r"^/%s.*" % endpoint if endpoint else r"^.*"), ParamHandler, dict(root=endpoint))]
def parse(self): from nbconvert import ScriptExporter exporter = ScriptExporter() source, _ = exporter.from_filename(self.fn) self.nodes = ast.parse(source, self.fn) with tempfile.NamedTemporaryFile(mode='w', dir=dirname(self.fn), delete=True) as tmp: tmp.write(source) tmp.flush() self.module = run_path(tmp.name, init_globals={'get_ipython': MagicMock()})
def get_imported_modules(glob_path: str) -> Set[str]: nb_paths = glob.glob(glob_path, recursive=True) if len(nb_paths) == 0: raise Exception( f"No notebooks found with glob {glob_path}, cwd {os.getcwd()}") se: Any = ScriptExporter() all_imported_modules = set() for path in nb_paths: try: [script, _] = se.from_filename(path) except Exception as ex: capture_exception(ex) click.echo(f"Import checker cannot read {path}") continue lines = script.split("\n") for line in lines: from_match = from_regexp.match(line) if from_match: all_imported_modules.add(from_match.group(1)) continue import_match = import_regexp.match(line) if import_match: all_imported_modules.add(import_match.group(1)) continue return all_imported_modules
def gen_tutorials(repo_dir: str) -> None: """Generate HTML tutorials for captum Docusaurus site from Jupyter notebooks. Also create ipynb and py versions of tutorial in Docusaurus site for download. """ with open(os.path.join(repo_dir, "website", "tutorials.json"), "r") as infile: tutorial_config = json.loads(infile.read()) tutorial_ids = {x["id"] for v in tutorial_config.values() for x in v} for tid in tutorial_ids: print("Generating {} tutorial".format(tid)) # convert notebook to HTML ipynb_in_path = os.path.join(repo_dir, "tutorials", "{}.ipynb".format(tid)) with open(ipynb_in_path, "r") as infile: nb_str = infile.read() nb = nbformat.reads(nb_str, nbformat.NO_CONVERT) # displayname is absent from notebook metadata nb["metadata"]["kernelspec"]["display_name"] = "python3" exporter = HTMLExporter() html, meta = exporter.from_notebook_node(nb) # pull out html div for notebook soup = BeautifulSoup(html, "html.parser") nb_meat = soup.find("div", {"id": "notebook-container"}) del nb_meat.attrs["id"] nb_meat.attrs["class"] = ["notebook"] html_out = JS_SCRIPTS + str(nb_meat) # generate html file html_out_path = os.path.join(repo_dir, "website", "_tutorials", "{}.html".format(tid)) with open(html_out_path, "w") as html_outfile: html_outfile.write(html_out) # generate JS file script = TEMPLATE.format(tid) js_out_path = os.path.join(repo_dir, "website", "pages", "tutorials", "{}.js".format(tid)) with open(js_out_path, "w") as js_outfile: js_outfile.write(script) # output tutorial in both ipynb & py form ipynb_out_path = os.path.join(repo_dir, "website", "static", "files", "{}.ipynb".format(tid)) with open(ipynb_out_path, "w") as ipynb_outfile: ipynb_outfile.write(nb_str) exporter = ScriptExporter() script, meta = exporter.from_notebook_node(nb) py_out_path = os.path.join(repo_dir, "website", "static", "files", "{}.py".format(tid)) with open(py_out_path, "w") as py_outfile: py_outfile.write(script)
def gen_tutorials(repo_dir: str, exec_tutorials: bool, kernel_name: Optional[str] = None) -> None: """Generate HTML tutorials for Docusaurus Ax site from Jupyter notebooks. Also create ipynb and py versions of tutorial in Docusaurus site for download. """ with open(os.path.join(repo_dir, "website", "tutorials.json"), "r") as infile: tutorial_config = json.loads(infile.read()) # flatten config dict tutorial_configs = [ config for category in tutorial_config.values() for config in category ] # prepare paths for converted tutorials & files os.makedirs(os.path.join(repo_dir, "website", "_tutorials"), exist_ok=True) os.makedirs(os.path.join(repo_dir, "website", "static", "files"), exist_ok=True) for config in tutorial_configs: tid = config["id"] t_dir = config.get("dir") exec_on_build = config.get("exec_on_build", True) print("Generating {} tutorial".format(tid)) if t_dir is not None: tutorial_dir = os.path.join(repo_dir, "tutorials", t_dir) html_dir = os.path.join(repo_dir, "website", "_tutorials", t_dir) js_dir = os.path.join(repo_dir, "website", "pages", "tutorials", t_dir) py_dir = os.path.join(repo_dir, "website", "static", "files", t_dir) for d in [tutorial_dir, html_dir, js_dir, py_dir]: os.makedirs(d, exist_ok=True) tutorial_path = os.path.join(tutorial_dir, "{}.ipynb".format(tid)) html_path = os.path.join(html_dir, "{}.html".format(tid)) js_path = os.path.join(js_dir, "{}.js".format(tid)) ipynb_path = os.path.join(py_dir, "{}.ipynb".format(tid)) py_path = os.path.join(py_dir, "{}.py".format(tid)) tar_path = os.path.join(py_dir, "{}.tar.gz".format(tid)) else: tutorial_dir = os.path.join(repo_dir, "tutorials") tutorial_path = os.path.join(repo_dir, "tutorials", "{}.ipynb".format(tid)) html_path = os.path.join(repo_dir, "website", "_tutorials", "{}.html".format(tid)) js_path = os.path.join(repo_dir, "website", "pages", "tutorials", "{}.js".format(tid)) ipynb_path = os.path.join(repo_dir, "website", "static", "files", "{}.ipynb".format(tid)) py_path = os.path.join(repo_dir, "website", "static", "files", "{}.py".format(tid)) # load notebook with open(tutorial_path, "r") as infile: nb_str = infile.read() nb = nbformat.reads(nb_str, nbformat.NO_CONVERT) # track total exec time (non-None if exec_on_build=True) total_time = None if exec_tutorials and exec_on_build: print("Executing tutorial {}".format(tid)) kwargs = { "kernel_name": kernel_name } if kernel_name is not None else {} ep = ExecutePreprocessor(timeout=600, **kwargs) start_time = time.time() # try / catch failures for now; should remove once tutorials # more stable try: # execute notebook, using `tutorial_dir` as working directory ep.preprocess(nb, {"metadata": {"path": tutorial_dir}}) total_time = time.time() - start_time print( "Done executing tutorial {}. Took {:.2f} seconds.".format( tid, total_time)) except Exception as exc: print("Couldn't execute tutorial {}!".format(tid)) print(exc) total_time = None # convert notebook to HTML exporter = HTMLExporter() html, meta = exporter.from_notebook_node(nb) # pull out html div for notebook soup = BeautifulSoup(html, "html.parser") nb_meat = soup.find("div", {"id": "notebook-container"}) del nb_meat.attrs["id"] nb_meat.attrs["class"] = ["notebook"] # when output html, iframe it (useful for Ax reports) for html_div in nb_meat.findAll("div", {"class": "output_html"}): if html_div.html is not None: iframe = soup.new_tag("iframe") iframe.attrs["src"] = "data:text/html;charset=utf-8," + str( html_div.html) # replace `#` in CSS iframe.attrs["src"] = iframe.attrs["src"].replace("#", "%23") html_div.contents = [iframe] html_out = MOCK_JS_REQUIRES + str(nb_meat) # generate HTML file with open(html_path, "w") as html_outfile: html_outfile.write(html_out) # generate JS file t_dir_js = t_dir if t_dir else "" script = TEMPLATE.format( t_dir=t_dir_js, tid=tid, total_time=total_time if total_time is not None else "null", ) with open(js_path, "w") as js_outfile: js_outfile.write(script) # output tutorial in both ipynb & py form nbformat.write(nb, ipynb_path) exporter = ScriptExporter() script, meta = exporter.from_notebook_node(nb) with open(py_path, "w") as py_outfile: py_outfile.write(script) # create .tar archive (if necessary) if t_dir is not None: with tarfile.open(tar_path, "w:gz") as tar: tar.add(tutorial_dir, arcname=os.path.basename(tutorial_dir))
def run(notebook, executable=None, rules=None, noqa_regex=None): nb = nbformat.read(notebook, 4) extra_metadata = extract_extrametadata(nb, noqa_regex=noqa_regex) ret = [] passed = True rules = rules or {} extra_metadata.update(rules) # TODO: consider warning if referring to non-existent rules rules_to_remove = extra_metadata['noqa'] & extra_metadata.keys() for rule in rules_to_remove: del extra_metadata[rule] # TODO: lintfail is more like lintpassed? if 'lines_per_cell' in extra_metadata: lintret, lintfail = lint_lines_per_cell( extra_metadata['cell_lines'], max_lines_per_cell=extra_metadata['lines_per_cell']) ret.extend(lintret) passed = passed and lintfail if 'cells_per_notebook' in extra_metadata: lintret, lintfail = lint_cells_per_notebook( extra_metadata['cell_count'], max_cells_per_notebook=extra_metadata['cells_per_notebook']) ret.extend(lintret) passed = passed and lintfail if 'function_definitions' in extra_metadata: lintret, lintfail = lint_function_definitions( extra_metadata['functions'], max_function_definitions=extra_metadata['function_definitions']) ret.extend(lintret) passed = passed and lintfail if 'class_definitions' in extra_metadata: lintret, lintfail = lint_class_definitions( extra_metadata['classes'], max_class_definitions=extra_metadata['class_definitions']) ret.extend(lintret) passed = passed and lintfail if 'kernelspec_requirements' in extra_metadata: lintret, lintfail = lint_kernelspec( kernelspec=extra_metadata['kernelspec'], kernelspec_requirements=extra_metadata['kernelspec_requirements']) ret.extend(lintret) passed = passed and lintfail if 'magics_whitelist' in extra_metadata or 'magics_blacklist' in extra_metadata: lintret, lintfail = lint_magics( magics=extra_metadata['magics'], whitelist=extra_metadata.get('magics_whitelist', None), blacklist=extra_metadata.get('magics_blacklist', None)) ret.extend(lintret) passed = passed and lintfail if executable: exp = ScriptExporter() (body, resources) = exp.from_notebook_node(nb) tf = NamedTemporaryFile(mode='w', suffix='.py', delete=False, encoding='utf8') tf_name = tf.name try: tf.write(body) tf.close() executable.append(tf_name) ret2 = _run_and_capture_utf8(executable) msg = ret2.stdout + '\t' + ret2.stderr ret.append( LintMessage(-1, 'Checking lint:\n' + msg.strip(), LintType.LINTER, False if msg.strip() else True)) finally: os.remove(tf_name) return ret, passed
def gen_tutorials(repo_dir: str) -> None: """Generate HTML tutorials for Docusaurus Ax site from Jupyter notebooks. Also create ipynb and py versions of tutorial in Docusaurus site for download. """ with open(os.path.join(repo_dir, "website", "tutorials.json"), "r") as infile: tutorial_config = json.loads(infile.read()) tutorial_ids = [x["id"] for v in tutorial_config.values() for x in v] tutorial_dirs = [x.get("dir") for v in tutorial_config.values() for x in v] for tid, t_dir in zip(tutorial_ids, tutorial_dirs): print("Generating {} tutorial".format(tid)) if t_dir is not None: tutorial_dir = os.path.join(repo_dir, "tutorials", t_dir) html_dir = os.path.join(repo_dir, "website", "_tutorials", t_dir) js_dir = os.path.join(repo_dir, "website", "pages", "tutorials", t_dir) py_dir = os.path.join(repo_dir, "website", "static", "files", t_dir) for d in [tutorial_dir, html_dir, js_dir, py_dir]: os.makedirs(d, exist_ok=True) tutorial_path = os.path.join(tutorial_dir, "{}.ipynb".format(tid)) html_path = os.path.join(html_dir, "{}.html".format(tid)) js_path = os.path.join(js_dir, "{}.js".format(tid)) ipynb_path = os.path.join(py_dir, "{}.ipynb".format(tid)) py_path = os.path.join(py_dir, "{}.py".format(tid)) tar_path = os.path.join(py_dir, "{}.tar.gz".format(tid)) else: tutorial_path = os.path.join(repo_dir, "tutorials", "{}.ipynb".format(tid)) html_path = os.path.join(repo_dir, "website", "_tutorials", "{}.html".format(tid)) js_path = os.path.join(repo_dir, "website", "pages", "tutorials", "{}.js".format(tid)) ipynb_path = os.path.join(repo_dir, "website", "static", "files", "{}.ipynb".format(tid)) py_path = os.path.join(repo_dir, "website", "static", "files", "{}.py".format(tid)) # convert notebook to HTML with open(tutorial_path, "r") as infile: nb_str = infile.read() nb = nbformat.reads(nb_str, nbformat.NO_CONVERT) exporter = HTMLExporter() html, meta = exporter.from_notebook_node(nb) # pull out html div for notebook soup = BeautifulSoup(html, "html.parser") nb_meat = soup.find("div", {"id": "notebook-container"}) del nb_meat.attrs["id"] nb_meat.attrs["class"] = ["notebook"] # when output html, iframe it (useful for Ax reports) for html_div in nb_meat.findAll("div", {"class": "output_html"}): if html_div.html is not None: iframe = soup.new_tag("iframe") iframe.attrs["src"] = "data:text/html;charset=utf-8," + str( html_div.html) # replace `#` in CSS iframe.attrs["src"] = iframe.attrs["src"].replace("#", "%23") html_div.contents = [iframe] html_out = "".join([JS_SCRIPT_TAGS.format(src) for src in SRCS]) + str(nb_meat) # generate HTML file with open(html_path, "w") as html_outfile: html_outfile.write(html_out) # generate JS file t_dir_js = t_dir if t_dir else "" script = TEMPLATE.format(t_dir=t_dir_js, tid=tid) with open(js_path, "w") as js_outfile: js_outfile.write(script) # output tutorial in both ipynb & py form with open(ipynb_path, "w") as ipynb_outfile: ipynb_outfile.write(nb_str) exporter = ScriptExporter() script, meta = exporter.from_notebook_node(nb) with open(py_path, "w") as py_outfile: py_outfile.write(script) # create .tar archive (if necessary) if t_dir is not None: with tarfile.open(tar_path, "w:gz") as tar: tar.add(tutorial_dir, arcname=os.path.basename(tutorial_dir))
def export(self, exporter=ScriptExporter()): (body, resources) = exporter.from_notebook_node(self.notebook) return dict( body=body, resources=resources )
def gen_tutorials(repo_dir: str) -> None: """Generate HTML tutorials for PyTorch3D Docusaurus site from Jupyter notebooks. Also create ipynb and py versions of tutorial in Docusaurus site for download. """ with open( os.path.join(repo_dir, 'website', 'tutorials.json'), 'r' ) as infile: tutorial_config = json.loads(infile.read()) tutorial_ids = {x['id'] for v in tutorial_config.values() for x in v} for tid in tutorial_ids: print('Generating {} tutorial'.format(tid)) # convert notebook to HTML ipynb_in_path = os.path.join( repo_dir, 'docs', 'tutorials', '{}.ipynb'.format(tid) ) with open(ipynb_in_path, 'r') as infile: nb_str = infile.read() nb = nbformat.reads(nb_str, nbformat.NO_CONVERT) # displayname is absent from notebook metadata nb['metadata']['kernelspec']['display_name'] = 'python3' exporter = HTMLExporter() html, meta = exporter.from_notebook_node(nb) # pull out html div for notebook soup = BeautifulSoup(html, 'html.parser') nb_meat = soup.find('div', {'id': 'notebook-container'}) del nb_meat.attrs['id'] nb_meat.attrs['class'] = ['notebook'] html_out = JS_SCRIPTS + str(nb_meat) # generate html file html_out_path = os.path.join( repo_dir, 'website', '_tutorials', '{}.html'.format(tid) ) with open(html_out_path, 'w') as html_outfile: html_outfile.write(html_out) # generate JS file script = TEMPLATE.format(tid) js_out_path = os.path.join( repo_dir, 'website', 'pages', 'tutorials', '{}.js'.format(tid) ) with open(js_out_path, 'w') as js_outfile: js_outfile.write(script) # output tutorial in both ipynb & py form ipynb_out_path = os.path.join( repo_dir, 'website', 'static', 'files', '{}.ipynb'.format(tid) ) with open(ipynb_out_path, 'w') as ipynb_outfile: ipynb_outfile.write(nb_str) exporter = ScriptExporter() script, meta = exporter.from_notebook_node(nb) py_out_path = os.path.join( repo_dir, 'website', 'static', 'files', '{}.py'.format(tid) ) with open(py_out_path, 'w') as py_outfile: py_outfile.write(script)