def start_pluto(port:Param("Port to Start Jupyter", type=int)=9000, tunnel:Param("Tunel Type", type=str)='ngrok', authtoken:Param("Tunnel Authtoken for ngrok", type=str)=None): "Starts Pluto.jl reactive notebook" if in_colab(): mount_drive() remote = RemotePluto(port=port, tunnel=tunnel, authtoken=authtoken) remote.launch()
def nbdev_clean_nbs(fname: Param("A notebook name or glob to convert", str) = None, clear_all: Param("Clean all metadata and outputs", bool) = False, disp: Param("Print the cleaned outputs", bool) = False, read_input_stream: Param( "Read input stram and not nb folder") = False): "Clean all notebooks in `fname` to avoid merge conflicts" #Git hooks will pass the notebooks in the stdin if read_input_stream and sys.stdin: input_stream = io.TextIOWrapper(sys.stdin.buffer, encoding='utf-8') nb = json.load(input_stream) clean_nb(nb, clear_all=clear_all) _print_output(nb) return path = None if fname is None: try: path = Config().path("nbs_path") except Exception as e: path = Path.cwd() files = nbglob(fname=ifnone(fname, path), recursive=True if fname is None else False) for f in files: if not str(f).endswith('.ipynb'): continue nb = json.loads(open(f, 'r', encoding='utf-8').read()) clean_nb(nb, clear_all=clear_all) if disp: _print_output(nb) else: x = json.dumps(nb, sort_keys=True, indent=1, ensure_ascii=False) with io.open(f, 'w', encoding='utf-8') as f: f.write(x) f.write("\n")
def resize(input: Param(help="Input image path", type=str), heigth: Param(help="heigth of the output image", type=int, default=256), width: Param(help="with of the output image", type=int, default=256), output: Param(help="Output image path", type=str) ) -> None: img = Image.open(input) img_resized = img.resize((width, heigth)).convert('RGB') img_resized.save(output, "JPEG")
def start_code(port:Param("Port to Start Code", type=int)=10000, password:Param("Password to Start Code", type=str)=None, tunnel:Param("Tunel Type", type=str)='ngrok', authtoken:Param("Tunnel Authtoken for ngrok", type=str)=None): "Starts Code Server" if in_colab(): mount_drive() remote = RemoteCode(password=password, port=port, tunnel=tunnel, authtoken=authtoken) remote.launch()
def nbdev_fix_merge( fname: Param("A notebook filename to fix", str), fast: Param( "Fast fix: automatically fix the merge conflicts in outputs or metadata", bool) = True, trust_us: Param("Use local outputs/metadata when fast mergning", bool) = True): "Fix merge conflicts in notebook `fname`" fix_conflicts(fname, fast=fast, trust_us=trust_us)
def nbdev_nb2md( fname: Param("A notebook file name to convert", str), dest: Param("The destination folder", str) = '.', img_path: Param("Folder to export images to") = "", jekyll: Param("To use jekyll metadata for your markdown file or not", bool_arg) = False, ): "Convert the notebook in `fname` to a markdown file" nb_detach_cells(fname, dest=img_path) convert_md(fname, dest, jekyll=jekyll, img_path=img_path)
def av_i2v( images: Param("Path to the images folder or list of images"), out_path: Param("Output output video path", str) = None, fps: Param("Frame per second", int) = 30, no_sort: Param("Sort images", bool) = False, max_num_frame: Param("Max num of frame", int) = 10e12, resize_rate: Param("Resize rate", float) = 1, with_text: Param("Add additional index to image when writing vidoe", bool) = False, text_is_date: Param("Add additional index to image when writing vidoe", bool) = False, verbose: Param("Print...", bool) = True, ): return images_to_video( images, out_path, fps, no_sort, max_num_frame, resize_rate, with_text, text_is_date, verbose, )
def nbdev_bump_version(part: Param("Part of version to bump", int) = 2): "Increment version in `settings.py` by one" cfg = Config() print(f'Old version: {cfg.version}') cfg.d['version'] = bump_version(Config().version, part) cfg.save() update_version() print(f'New version: {cfg.version}')
def nbdev_build_docs( fname: Param("A notebook name or glob to convert", str) = None, force_all: Param("Rebuild even notebooks that haven't changed", bool_arg) = False, mk_readme: Param("Also convert the index notebook to README", bool_arg) = True, n_workers: Param("Number of workers to use", int) = None, pause: Param( "Pause time (in secs) between notebooks to avoid race conditions", float) = 0.5): "Build the documentation by converting notebooks mathing `fname` to html" notebook2html(fname=fname, force_all=force_all, n_workers=n_workers, pause=pause) if fname is None: make_sidebar() if mk_readme: make_readme()
def nbdev_trust_nbs(fname: Param("A notebook name or glob to convert", str) = None, force_all: Param( "Trust even notebooks that haven't changed", bool) = False): "Trust noteboks matching `fname`" check_fname = Config().nbs_path / ".last_checked" last_checked = os.path.getmtime( check_fname) if check_fname.exists() else None files = Config().nbs_path.glob( '**/*.ipynb') if fname is None else glob.glob(fname) for fn in files: if last_checked and not force_all: last_changed = os.path.getmtime(fn) if last_changed < last_checked: continue nb = read_nb(fn) if not NotebookNotary().check_signature(nb): NotebookNotary().sign(nb) check_fname.touch(exist_ok=True)
def nbdev_new(name: Param("A directory to create the project in", str), template_git_repo: Param("url to template repo", str) = _template_git_repo): "Create a new nbdev project with a given name." path = Path(f"./{name}").absolute() if path.is_dir(): print(f"Directory {path} already exists. Aborting.") return print(f"Creating a new nbdev project {name}.") def rmtree_onerror(func, path, exc_info): "Use with `shutil.rmtree` when you need to delete files/folders that might be read-only." os.chmod(path, stat.S_IWRITE) func(path) try: subprocess.run(['git', 'clone', f'{template_git_repo}', f'{path}'], check=True, timeout=5000) # Note: on windows, .git is created with a read-only flag shutil.rmtree(path / ".git", onerror=rmtree_onerror) subprocess.run("git init".split(), cwd=path, check=True) subprocess.run("git add .".split(), cwd=path, check=True) subprocess.run("git commit -am \"Initial\"".split(), cwd=path, check=True) print( f"Created a new repo for project {name}. Please edit settings.ini and run nbdev_build_lib to get started." ) except Exception as e: print("An error occured while copying nbdev project template:") print(e) if os.path.isdir(path): try: shutil.rmtree(path, onerror=rmtree_onerror) except Exception as e2: print( f"An error occured while cleaning up. Failed to delete {path}:" ) print(e2)
def nbdev_read_nbs(fname: Param("A notebook name or glob to convert", str) = None): "Check all notebooks matching `fname` can be opened" files = Config().nbs_path.glob( '**/*.ipynb') if fname is None else glob.glob(fname) for nb in files: try: _ = read_nb(nb) except Exception as e: print(f"{nb} is corrupted and can't be opened.") raise e
def fsize(fldr:Param("Pathstring of folder to measure size")="."): path = pathlib.Path(fldr).resolve() app_logger.info(f"Current folder: {path}") with pd.option_context('display.max_rows', None, 'display.max_columns', None, 'display.width', 10000, 'display.precision', 3, 'display.colheader_justify', 'left'): df = pd.DataFrame([{"Name":p.name, "Size":_convert_size(p.stat().st_size), "Bytes":p.stat().st_size} for p in path.ls()]) app_logger.info(f"Current foldersize: {_convert_size(df['Bytes'].sum())}") print(df.sort_values(by='Bytes').drop('Bytes', axis=1).reset_index(drop=True))
def download_file_from_google_drive( id_or_link: Param("Link or file id"), destination: Param("Path to the save file")): if "https" in id_or_link: x = id_or_link id = x.split("/")[x.split("/").index("d") + 1] else: id = id_or_link logger.info(f"Download from id: {id}") import requests def get_confirm_token(response): for key, value in response.cookies.items(): if key.startswith('download_warning'): return value return None def save_response_content(response, destination): CHUNK_SIZE = 32768 with open(destination, "wb") as f: for chunk in response.iter_content(CHUNK_SIZE): if chunk: # filter out keep-alive new chunks f.write(chunk) URL = "https://docs.google.com/uc?export=download" session = requests.Session() response = session.get(URL, params={'id': id}, stream=True) token = get_confirm_token(response) if token: params = {'id': id, 'confirm': token} response = session.get(URL, params=params, stream=True) save_response_content(response, destination) logger.info(f"Done -> {destination}") return osp.abspath(destination)
def detalhar_issue( inspecao: Param("Número da Inspeção a ser relatada", str), login: Param("Login Anatel do Usuário", str) = None, senha: Param("Senha Utilizada nos Sistemas Interativos da Anatel", str) = None, fiscaliza: Param( "Objeto Redmine logado, opcional ao login e senha", Redmine ) = None, teste: Param("Indica se o relato será de teste", bool_arg) = True, ) -> dict: """Recebe número da inspeção `inspecao`, o login e senha ou opcionalmente objeto Redmine logado `fiscaliza` inspecao: str - Número da Inspeção a ser relatada login: str - Login Anatel do Usuário senha: str - Senha Utilizada nos Sistemas Interativos da fiscaliza: Redmine - Objeto Redmine logado, opcional ao login e senha teste: bool - Caso verdadeiro o Fiscaliza de Teste ( Homologação ) é utilizado Returns: dict: Retorna um dicionário com a Situação Atual e campos preenchidos da Inspeção """ if not login or not senha: assert ( fiscaliza is not None ), "Para logar no Fiscaliza é preciso login e senha ou o objeto fiscaliza" valida_fiscaliza(fiscaliza) else: fiscaliza = auth_user(login, senha, teste) if not teste: hm2prod() result = dict() issue = fiscaliza.issue.get(inspecao, include=["relations", "attachments"]) result.update({k: str(getattr(issue, k, "")) for k in FIELDS}) if custom_fields := getattr(issue, "custom_fields", None): custom_fields = list(custom_fields) for field in custom_fields: key = field.id result[ID2FIELD.get(key, utf2ascii(field.name))] = getattr(field, "value")
def start_jupyter(port:Param("Port to Start Jupyter", type=int)=9000, ui:Param("Interface to start", type=str)='notebook', tunnel:Param("Tunel Type", type=str)='ngrok', authtoken:Param("Tunnel Authtoken for ngrok", type=str)=None, subdomain:Param("Add a subdomain only for localtunnel", type=str)=None, install_code:Param("Flag to install code", type=bool)=False, install_julia:Param("Flag to install code", type=bool)=False, ): "Starts Jupyter" if in_colab(): print("Mounting Drive") mount_drive() remote = RemoteJupyter(port=port, ui=ui, tunnel=tunnel, authtoken=authtoken, subdomain=subdomain, install_code=install_code, install_julia=install_julia ) remote.launch()
def nbdev_test_nbs( fname: Param("A notebook name or glob to convert", str) = None, flags: Param("Space separated list of flags", str) = None, n_workers: Param("Number of workers to use", int) = None, verbose: Param("Print errors along the way", bool) = True, timing: Param("Timing each notebook to see the ones are slow", bool) = False, pause: Param( "Pause time (in secs) between notebooks to avoid race conditions", float) = 0.5): "Test in parallel the notebooks matching `fname`, passing along `flags`" if flags is not None: flags = flags.split(' ') if fname is None: files = [ f for f in Config().nbs_path.glob('*.ipynb') if not f.name.startswith('_') ] else: files = glob.glob(fname) files = [Path(f).absolute() for f in sorted(files)] if n_workers is None: n_workers = 0 if len(files) == 1 else min(num_cpus(), 8) # make sure we are inside the notebook folder of the project os.chdir(Config().nbs_path) results = parallel(_test_one, files, flags=flags, verbose=verbose, n_workers=n_workers, pause=pause) passed, times = [r[0] for r in results], [r[1] for r in results] if all(passed): print("All tests are passing!") else: msg = "The following notebooks failed:\n" raise Exception( msg + '\n'.join([f.name for p, f in zip(passed, files) if not p])) if timing: for i, t in sorted(enumerate(times), key=lambda o: o[1], reverse=True): print(f"Notebook {files[i].name} took {int(t)} seconds")
def relatar_inspecao( inspecao: Param("Número da Inspeção a ser relatada", str), login: Param("Login Anatel do Usuário", str), senha: Param("Senha Utilizada nos Sistemas Interativos da Anatel", str), dados: Param("Dicionário já validado com os Dados a serem relatados"), teste: Param("Indica se o relato será de teste", bool_arg) = True, parar_em: Param( "String indicando até onde o relato deve ser avançado", str ) = "Relatada", substituir_relatorio: Param( "Substituir o relatório criado caso houver?", bool_arg ) = False, ): """Relata a inspeção `inspecao` com os dados constantes no dicionário `dados`""" assert ( parar_em in SITUACAO.keys() ), f"Forneça um dos valores para parar_em {SITUACAO.keys()}" if not isinstance(dados, dict): try: path = Path(dados) assert path.exists(), f"O caminho retornado não existe: {path}!" assert ( path.is_file() ), f"O caminho retornado {path} não corresponde a um arquivo!" except TypeError as e: raise ValueError(f"O caminho de arquivo inserido {dados} é inválido") from e if path.suffix == ".json": dados = json.loads(path.read_text()) else: raise TypeError(f"Formato de Arquivo Desconhecido {path.suffix}") dados = dados.copy() # Não altera o dicionário original console = Console() fiscaliza = auth_user(login, senha, teste) console.print("Usuário Autenticado com Sucesso :thumbs_up:", style="bold green") if issue_type(inspecao, fiscaliza) == "Ação": console.print( f":exclamation: O número de inspeção inserido {inspecao} corresponde a uma [bold red]Ação[/bold red] :exclamation:" ) return acao = insp2acao(inspecao, fiscaliza) console.print(f"Inspeção {inspecao} vinculada à Ação {acao['id_ACAO']}") with console.status("Resgatando Situação Atual da Inspeção...", spinner="pong"): status_atual = detalhar_issue( inspecao=inspecao, fiscaliza=fiscaliza, teste=teste ) console.print( f":white_check_mark: [cyan]Estado Atual: [bold green]{status_atual['status']}" ) atual = status_atual["status"] lista_status = list(SITUACAO.keys()) index = lista_status.index(atual) lista_status = lista_status[index : lista_status.index(parar_em) + 1] if relatorio := status_atual.get("Relatorio_de_Monitoramento"): if not substituir_relatorio: console.print( f"[bold red] :warning: Já existe um Relatório de Monitoramento criado, esse campo não será atualizado :warning:" ) del dados["Html"] else: console.print( f":wastebasket: [red] Foi solicitado a substituição do Relatório, é preciso atualizar a inspeção para descartá-lo primeiramente. Aguarde..." ) temp = dados.copy() temp["Gerar_Relatorio"] = {"id": FIELD2ID["Gerar_Relatorio"], "value": 0} temp["Relatorio_de_Monitoramento"] = { "id": FIELD2ID["Relatorio_de_Monitoramento"], "value": "", } temp["Html"] = {"id": FIELD2ID["Html"], "value": ""} atualiza_fiscaliza(inspecao, temp, fiscaliza, status=atual) # atualiza_fiscaliza(inspecao, temp, fiscaliza, status=atual) relatorio = None
def nbdev_update_lib(fname: Param("A notebook name or glob to convert", str) = None): "Propagates any change in the modules matching `fname` to the notebooks that created them" script2notebook(fname=fname)
def process_data(param1: Param("The message", str)): """Function for processing data related to the library.""" pass
def nbdev_build_lib(fname: Param("A notebook name or glob to convert", str) = None): "Export notebooks matching `fname` to python modules" write_tmpls() notebook2script(fname=fname)
def train(param1: Param("The message", str)): """Function for training models related to the library.""" pass
def nbdev_detach(path_nb: Param("Path to notebook"), dest: Param("Destination folder", str) = "", use_img: Param("Convert markdown images to img tags", bool_arg) = False): "Export cell attachments to `dest` and update references" nb_detach_cells(path_nb, dest=dest, use_img=use_img)
def validar_dicionario( data_dict: Param("Dicionário de Dados ou Caminho para o arquivo .json"), inspecao: Param("Número da Inspeção a ser relatada", str), login: Param("Login Anatel do Usuário", str) = None, senha: Param("Senha Utilizada nos Sistemas Interativos da Anatel", str) = None, fiscaliza: Param( "Objeto Redmine logado, opcional ao login e senha", Redmine ) = None, teste: Param( "Caso verdadeiro o Fiscaliza de Teste ( Homologação ) é utilizado", bool_arg ) = True, save_path: Param("Caminho para salvar o dicionário formatado", str) = None, ) -> dict: """Valida as informações de data_dict e as formata como exigido pela API do Redmine. Opcionalmente salva o dicionário serializado como .json caso seja passado um `save_path` válido Returns: dicionário com os dados formatados """ keys = list(DICT_FIELDS.keys()) if not isinstance(data_dict, dict): try: path = Path(data_dict) assert path.exists(), f"O caminho retornado não existe: {path}!" assert ( path.is_file() ), f"O caminho retornado {path} não corresponde a um arquivo!" except TypeError as e: raise ValueError( f"O caminho de arquivo inserido {data_dict} é inválido" ) from e if path.suffix == ".json": data_dict = json.loads(path.read_text()) else: raise TypeError(f"Formato de Arquivo Desconhecido {path.suffix}") if not set(data_dict.keys()).issubset(keys): raise ValueError( f"As chaves seguintes são desconhecidas ou estão com o nome diferente do esperado: \ {set(data_dict.keys()).difference(keys)}" ) if not login or not senha: assert ( fiscaliza is not None ), "Para logar no Fiscaliza é preciso login e senha ou o objeto fiscaliza" valida_fiscaliza(fiscaliza) else: fiscaliza = auth_user(login, senha, teste) if teste: prod2hm() else: hm2prod() issue = fiscaliza.issue.get(inspecao, include=["relations", "attachments"]) issue_id = issue.id date_pattern = "([2]\d{3})-(0[1-9]|1[0-2])-(0[1-9]|[12]\d|3[01])" d = {k: v for k, v in data_dict.items() if k in DICT_FIELDS.keys()} id2name, name2id = issue2users(issue_id, fiscaliza) console = Console() key = keys[0] if classe := d.get(key): d[key] = check_update(key, classe, DICT_FIELDS[key], CLASSE, True)
def nb2py( nb: Param( "absolute or relative full path to the notebook you want to convert to a python script", str) = None, folder: Param( "absolute or relative path to folder of the script you will create. Defaults to current nb's directory", str) = None, name: Param( "name of the script you want to create. Defaults to current nb name .ipynb by .py", str) = None, save: Param("saves the nb before converting it to a script", store_false) = True, run: Param("import and run the script", store_true) = False, verbose: Param("controls verbosity", store_false) = True, ): "Converts a notebook to a python script in a predefined folder." # make sure drive is mounted when using Colab if is_colab(): maybe_mount_gdrive() # nb path & name if nb is not None: nb_path = Path(nb) nb_path = nb_path.parent / f"{nb_path.stem}.ipynb" else: try: nb_path = get_nb_path() except: print( "nb2py couldn't get the nb name. Pass it as an nb argument and rerun nb2py." ) return if nb_path is None: print( "nb2py couldn't get the nb name. Pass it as an nb argument and rerun nb2py." ) return nb_name = nb_path.name assert os.path.isfile( nb_path ), f"nb2py couldn't find {nb_path}. Please, confirm the path is correct." # save nb: only those that are run from the notebook itself if save and not is_colab() and nb is None: try: save_nb(nb_name) except: print( f"nb2py couldn't save the nb automatically. It will used last saved at {to_local_time(os.path.getmtime(nb_name))}" ) # script path & name if folder is not None: folder = Path(folder) else: folder = nb_path.parent if name is not None: name = f"{Path(name).stem}.py" else: name = f"{nb_path.stem}.py" script_path = folder / name # delete file if exists and create script_path folder if doesn't exist if os.path.exists(script_path): os.remove(script_path) script_path.parent.mkdir(parents=True, exist_ok=True) # Write script header with open(script_path, 'w') as f: f.write(f'# -*- coding: utf-8 -*-\n') f.write(f'"""{nb_name}\n\n') f.write(f'Automatically generated.\n\n') if nb_path is not None: f.write(f'Original file is located at:\n') f.write(f' {nb_path}\n') f.write(f'"""') # identify convertible cells (excluding empty and those with hide flags) for i in range(10): try: with open(Path(nb_path), 'r', encoding='utf8') as f: nb = nbformat.reads(f.read(), as_version=4) break except: time.sleep(.5) idxs = _get_unhidden_cells(nb['cells']) pnb = nbformat.from_dict(nb) pnb['cells'] = [pnb['cells'][i] for i in idxs] # clean up cells and write script sep = '\n' * 2 for i, cell in enumerate(pnb['cells']): source_str = cell['source'].replace('\r', '') code_lines = source_str.split('\n') if code_lines == ['']: continue while code_lines[0] == '': code_lines = code_lines[1:] while code_lines[-1] == '': code_lines = code_lines[:-1] cl = [] for j in range(len(code_lines)): if list(set(code_lines[j].split(" "))) == ['']: code_lines[j] = '' if i == 0 or code_lines[j - 1] != '' or code_lines[j] != '': cl.append(code_lines[j]) code_lines = cl code = sep + '\n'.join(code_lines) with open(script_path, 'a', encoding='utf8') as f: f.write(code) # check script exists assert os.path.isfile( script_path ), f"an error occurred during the export and {script_path} doesn't exist" if verbose: print(f"{nb_name} converted to {script_path}") if run: runpy.run_path(script_path) return str(script_path)
def reproduce(param1: Param("The message", str)): """Function for reproducing results related to the library.""" pass
def evaluate(param1: Param("The message", str)): """Function for evaluating models related to the library.""" pass
def play(n: Param("The number of enemies you want to encounter.", int) = 10): player = init_player() game_loop(player, n)
def v2i(input_video: Param("", str), output_dir: Param("", str) = None, skip: Param("", int) = 1): return video_to_images(input_video, output_dir, skip)