Beispiel #1
0
def assert_error_notebook(
    notebook_path: Union[str, Path],
    expected_error_type: Union[type, None],
    expected_error_msg: Union[str, None],
    error_label: str = ERROR_LABEL,
    verbose: bool = VERBOSE,
) -> None:
    """Assert that notebook raise specific error.

    Parameters
    ----------
    notebook_path : Union[str, Path]
        Path of notebook
    expected_error_type : Union[type, None]
        Expected error of the notebook
    expected_error_msg : Union[str, None]
        Expected error message of the notebook
    error_label : str, optional
        Name of the error
    verbose: bool, optional
        Option to print more information, by default False
    """
    notebook_path = check_is_notebook(notebook_path)
    error_type, error_msg = get_error_notebook(notebook_path, verbose)
    if error_type != expected_error_type:
        msg = f"{error_label}: {error_type} != {expected_error_type}"
        raise BoarError(msg)

    if (error_type is None) or (expected_error_msg is None):
        return

    if str(error_msg) != str(expected_error_msg):
        msg = f"{error_label}: {str(error_msg)} != {str(expected_error_msg)}"
        raise BoarError(msg)
Beispiel #2
0
def get_logger_print(verbose: Any):
    logger_print = (lambda x: None)
    if isinstance(verbose, bool):
        logger_print = print if verbose else logging.info
    elif isinstance(verbose, object):
        logger_print = verbose
    else:
        msg = f"Undefined verbose: `{verbose}`."
        raise BoarError(msg)
    return logger_print
Beispiel #3
0
def split_lines_with_block_tag(
    source_to_split: str,
    tag: str,
) -> List[str]:
    block_splits, block = [], []
    for line in source_to_split.split("\n"):
        block.append(line)
        if tag in line:
            block_splits.append("\n".join(block))
            block = []
            continue
    block_splits.append("\n".join(block))

    if len(block_splits) > 2:
        msg = f"Multiple `{tag}` in:\n{source_to_split}"
        raise BoarError(msg)
    return block_splits
Beispiel #4
0
def lint_file(
    file_path: Union[str, Path],
    error_label: str = ERROR_LABEL,
    verbose: Any = VERBOSE,
    inline: bool = INLINE,
) -> Union[None, str]:
    """Lints one file.

    Parameters
    ----------
    file_path : Union[str, Path]
        Päth of the notebook, must be file
    error_label : str, optional
        Name of the error
    verbose : Any, optional
        Verbosity optional
    inline : bool,optional
        Replace existing notebook with linted version

    Returns
    -------
    Union[None, str]
        Path in posix format if notebook fail else None

    Raises
    ------
    BoarError
        Notebook is not a file or not linted.
    """
    file_path = check_is_notebook(file_path)
    counts = get_code_execution_counts(file_path)
    cell_counts = get_cell_counts(counts)
    if cell_counts == []:
        return None

    file_posix = Path(file_path).as_posix()
    log_lint(file_posix, cell_counts, verbose)

    if inline:
        remove_output(file_path, inline)
        return file_posix

    msg = f"{error_label}: {file_posix}"
    raise BoarError(msg)
Beispiel #5
0
def parse_ipynb(
    notebook_path: Union[str, Path],
    selection: Union[None, str] = None,
    projection: str = "code",
) -> List[Union[List[str], Dict[str, List[str]]]]:
    with open(notebook_path, "r") as json_stream:
        content = json.load(json_stream)
    if selection is None:
        return [
            cell for cell in content["cells"] if cell["cell_type"] == "code"
        ]
    if selection in ["execution_count", "metadata", "outputs", "source"]:
        return [
            cell[selection] for cell in content["cells"]
            if cell["cell_type"] == "code"
        ]

    msg = f"{selection} is an invalid selection for notebook parsing."
    raise BoarError(msg)
Beispiel #6
0
def apply_notebook(
    notebook_path: Union[str, Path],
    func_to_apply: FunctionType,
    error_label: str,
    verbose: Any = VERBOSE,
    func_params: dict = {},
    recursion_level: int = 0,
    max_recursion: Union[int, None] = None,
) -> List[str]:
    notebook_path = Path(notebook_path)
    incorrect_files = []

    # If max recursion
    if (max_recursion is not None) and (recursion_level >= max_recursion):
        return incorrect_files

    # If notebook
    if notebook_path.suffix == ".ipynb":
        try:
            file_posix = func_to_apply(notebook_path, error_label, verbose, **func_params)
        except BoarError:
            file_posix = Path(notebook_path).as_posix()
        incorrect_files.append(file_posix)

    # If directory
    if notebook_path.is_dir():
        incorrect_files.extend(apply_dir(
            notebook_path, func_to_apply, error_label,
            verbose, func_params, recursion_level+1, max_recursion
        ))

    incorrect_lint_files = [name for name in incorrect_files if name is not None]

    if (recursion_level != 0):
        return incorrect_lint_files

    if incorrect_lint_files == []:
        return None

    incorrect_lint_str = "\n".join(incorrect_lint_files)
    msg = f"{error_label} issues in:\n{incorrect_lint_str}"
    raise BoarError(msg)
Beispiel #7
0
def check_is_notebook(file_path: Union[str, Path]):
    file_path = Path(file_path)
    if not (file_path.is_file() and file_path.suffix == ".ipynb"):
        msg = f"{file_path} has invalid format."
        raise BoarError(msg)
    return file_path
Beispiel #8
0
def run_notebook(
    notebook_path: Union[str, Path],
    inputs: dict = {},
    verbose: Union[bool, object] = VERBOSE,
    Tag: EnumMeta = Tag,
) -> dict:
    """Run notebook one cell and one line at a time.

    Parameters
    ----------
    notebook_path : Union[str, Path]
        Path of notebook
    inputs : dict, optional
        Parameter to set before launching the script, by default {}
    verbose: Union[bool, object], optional
        Option to print more information, by default False
    Tag : EnumMeta, optional
        Name of the tags, by default Tag

    Returns
    -------
    dict
        Outputs to return if `export`-tags set in notebook

    Raises
    ------
    BoarError
        If `export*` and `skip*` tags in the same source
    BoarError
        If `*start` and `*line` tags in the same source
    """
    # Parse json
    notebook_path = check_is_notebook(notebook_path)
    sources = get_code_sources(notebook_path)

    # Run set new inputs
    locals().update(inputs)

    # Run Code
    outputs = {}
    for cell_index, source in enumerate(sources):
        # Parse source lines for execution
        source_to_exec = strap_source_in_one_line(source)
        log_execution(cell_index, source_to_exec, verbose=verbose)

        # Run, if no export tag
        if (Tag.EXPORT.value not in source_to_exec) and (Tag.SKIP.value not in source_to_exec):
            exec(source_to_exec)
            continue

        # Raise error if too different tag type
        if (Tag.EXPORT.value in source_to_exec) and (Tag.SKIP.value in source_to_exec):
            msg = f"`{Tag.EXPORT.value}*` and `{Tag.EXPORT.value}*` cannot be in same cell."
            raise BoarError(msg)

        # Define tags
        if (Tag.EXPORT.value in source_to_exec):
            start_tag = Tag.EXPORT_START.value
            end_tag = Tag.EXPORT_END.value
            line_tag = Tag.EXPORT_LINE.value

        if (Tag.SKIP.value in source_to_exec):
            start_tag = Tag.SKIP_START.value
            end_tag = Tag.SKIP_END.value
            line_tag = Tag.SKIP_LINE.value

        # Raise error if incompatible extensions
        if (start_tag in source_to_exec) and (line_tag in source_to_exec):
            msg = f"`{start_tag}` and `{line_tag}` cannot be in same cell."
            raise BoarError(msg)

        # Executre python code
        if start_tag in source_to_exec:
            diffs = execute_by_block(source_to_exec, start_tag, end_tag, locals())
            outputs.update(diffs)
            continue

        if line_tag in source_to_exec:
            diffs = execute_by_line(source_to_exec, line_tag, locals())
            outputs.update(diffs)
            continue

    close_plots()
    return deepcopy(outputs)