コード例 #1
0
def diff(ctx, files, metrics, all, detail, revision):
    """Show the differences in metrics for each file."""
    config = ctx.obj["CONFIG"]

    if not exists(config):
        handle_no_cache(ctx)

    if not metrics:
        metrics = get_default_metrics(config)
        logger.info(f"Using default metrics {metrics}")
    else:
        metrics = metrics.split(",")
        logger.info(f"Using specified metrics {metrics}")

    from wily.commands.diff import diff

    logger.debug(f"Running diff on {files} for metric {metrics}")
    diff(
        config=config,
        files=files,
        metrics=metrics,
        changes_only=not all,
        detail=detail,
        revision=revision,
    )
コード例 #2
0
def build(ctx, max_revisions, targets, operators, archiver):
    """Build the wily cache."""
    config = ctx.obj["CONFIG"]

    from wily.commands.build import build

    if max_revisions:
        logger.debug(f"Fixing revisions to {max_revisions}")
        config.max_revisions = max_revisions

    if operators:
        logger.debug(f"Fixing operators to {operators}")
        config.operators = operators.strip().split(",")

    if archiver:
        logger.debug(f"Fixing archiver to {archiver}")
        config.archiver = archiver

    if targets:
        logger.debug(f"Fixing targets to {targets}")
        config.targets = targets

    build(
        config=config,
        archiver=resolve_archiver(config.archiver),
        operators=resolve_operators(config.operators),
    )
    logger.info(
        "Completed building wily history, run `wily report <file>` or `wily index` to see more."
    )
コード例 #3
0
def report(ctx, file, metrics, number, message, format, console_format,
           output):
    """Show metrics for a given file."""
    config = ctx.obj["CONFIG"]

    if not exists(config):
        handle_no_cache(ctx)

    if not metrics:
        metrics = get_default_metrics(config)
        logger.info(f"Using default metrics {metrics}")

    new_output = Path().cwd()
    if output:
        new_output = new_output / Path(output)
    else:
        new_output = new_output / "wily_report" / "index.html"

    from wily.commands.report import report

    logger.debug(f"Running report on {file} for metric {metrics}")
    logger.debug(f"Output format is {format}")

    report(
        config=config,
        path=file,
        metrics=metrics,
        n=number,
        output=new_output,
        include_message=message,
        format=ReportFormat[format],
        console_format=console_format,
    )
コード例 #4
0
ファイル: diff.py プロジェクト: ewuerger/dbwily
def generate_json_diff(
    path: Path,
    output: Path,
    data: T.List[T.Tuple[str, ...]],
    headers: T.Tuple[str, ...],
) -> None:
    """
    Make a JSON file of diff of latest commit for codefile/dir found on path.

    :param path: Path to measured file/dir
    :param output: Destination path
    :param data: List of data-tuples
    :param headers: Tuples of names of metrics
    """
    report_path, report_output = check_output(output, ".json")
    files = [t for t in data if ":" not in t[0]]
    metric_data = dict(issues=[])
    for filet in files:
        file = filet[0]
        issue = dict(zip(headers, filet))
        issue["location"] = file
        metric_data["issues"].append(issue)

        funcs = [t for t in data if t[0].startswith(file) and t[0] != file]
        for tup in funcs:
            issue = dict(zip(["Function", *headers[1:]], tup))
            issue["location"] = file
            metric_data["issues"].append(issue)

    report_json_string = dumps(metric_data)
    report_output.write_text(report_json_string)

    logger.info(f"wily report on {str(path)} was saved to {report_path}")
コード例 #5
0
def diff(ctx, files, metrics, all, detail, revision, format, output):
    """Show the differences in metrics for each file."""
    config = ctx.obj["CONFIG"]

    if not exists(config):
        handle_no_cache(ctx)

    if not metrics:
        metrics = get_default_metrics(config)
        logger.info(f"Using default metrics {metrics}")
    else:
        metrics = metrics.split(",")
        logger.info(f"Using specified metrics {metrics}")

    new_output = output if output is None else Path().cwd() / Path(output)

    from wily.commands.diff import diff

    logger.debug(f"Running diff on {files} for metric {metrics}")
    logger.debug(f"Output format is {format}")

    diff(
        config=config,
        files=files,
        metrics=metrics,
        changes_only=not all,
        detail=detail,
        output=new_output,
        revision=revision,
        format=ReportFormat[format],
    )
コード例 #6
0
def clean(ctx, yes):
    """Clear the .wily/ folder."""
    config = ctx.obj["CONFIG"]

    if not exists(config):
        logger.info("Wily cache does not exist, nothing to remove.")
        exit(0)

    if not yes:
        p = input("Are you sure you want to delete wily cache? [y/N]")
        if p.lower() != "y":
            exit(0)

    from wily.cache import clean

    clean(config)
コード例 #7
0
def generate_json_report(path: Path, output: Path, data: T.List[T.Tuple[str]],
                         headers: T.Tuple[str]) -> None:
    """
    Make a JSON file of report of latest commit for codefile/dir found on path.

    :param path: Path to measured file/dir
    :param output: Destination path
    :param data: List of data-tuples
    :param headers: Tuples of names of metrics
    """
    report_path, report_output = check_output(output, ".json")
    metric_data = dict(zip(headers, data[-1]))
    metric_data["location"] = str(path)
    report_json_string = dumps(dict(issues=[metric_data]))
    report_output.write_text(report_json_string)

    logger.info(f"wily report on {str(path)} was saved to {report_path}")
コード例 #8
0
ファイル: __main__.py プロジェクト: Kilo59/wily
def report(ctx, file, metrics, number, message):
    """Show metrics for a given file."""
    config = ctx.obj["CONFIG"]

    if not exists(config):
        handle_no_cache(ctx)

    if not metrics:
        metrics = get_default_metrics(config)
        logger.info(f"Using default metrics {metrics}")

    from wily.commands.report import report

    logger.debug(f"Running report on {file} for metric {metrics}")
    report(config=config,
           path=file,
           metrics=metrics,
           n=number,
           include_message=message)
コード例 #9
0
def generate_html_report(path: Path, output: Path, data: T.List[T.Tuple[str]],
                         headers: T.Tuple[str]) -> None:
    """
    Make an HTML report from metrics data for codefile/dir found on path.

    :param path: Path to measured file/dir
    :param output: Destination path
    :param data: List of data-tuples
    :param headers: Tuples of header-strings for the metrics table
    """
    report_path, report_output = check_output(output)
    templates_dir = (Path(__file__).parents[1] / "templates").resolve()
    report_template = Template(
        (templates_dir / "report_template.html").read_text())

    table_headers = "".join([f"<th>{header}</th>" for header in headers])
    table_content = ""
    for line in data[::-1]:
        table_content += "<tr>"
        for element in line:
            element = element.replace("[32m", "<span class='green-color'>")
            element = element.replace("[31m", "<span class='red-color'>")
            element = element.replace("[33m", "<span class='orange-color'>")
            element = element.replace("[0m", "</span>")
            table_content += f"<td>{element}</td>"
        table_content += "</tr>"

    report_template = report_template.safe_substitute(headers=table_headers,
                                                      content=table_content)
    report_output.write_text(report_template)

    try:
        copytree(str(templates_dir / "css"), str(report_path / "css"))
    except FileExistsError:
        pass

    logger.info(f"wily report on {str(path)} was saved to {report_path}")
コード例 #10
0
def build(config, archiver, operators):
    """
    Build the history given a archiver and collection of operators.

    :param config: The wily configuration
    :type  config: :namedtuple:`wily.config.WilyConfig`

    :param archiver: The archiver to use
    :type  archiver: :namedtuple:`wily.archivers.Archiver`

    :param operators: The list of operators to execute
    :type operators: `list` of :namedtuple:`wily.operators.Operator`
    """
    try:
        logger.debug(f"Using {archiver.name} archiver module")
        archiver = archiver.cls(config)
        revisions = archiver.revisions(config.path, config.max_revisions)
    except InvalidGitRepositoryError:
        # TODO: This logic shouldn't really be here (SoC)
        logger.info(f"Defaulting back to the filesystem archiver, not a valid git repo")
        archiver = FilesystemArchiver(config)
        revisions = archiver.revisions(config.path, config.max_revisions)
    except Exception as e:
        if hasattr(e, "message"):
            logger.error(f"Failed to setup archiver: '{e.message}'")
        else:
            logger.error(f"Failed to setup archiver: '{type(e)} - {e}'")
        exit(1)

    state = State(config, archiver=archiver)
    # Check for existence of cache, else provision
    state.ensure_exists()

    index = state.index[archiver.name]

    # remove existing revisions from the list
    revisions = [revision for revision in revisions if revision not in index][::-1]

    logger.info(
        f"Found {len(revisions)} revisions from '{archiver.name}' archiver in '{config.path}'."
    )

    _op_desc = ",".join([operator.name for operator in operators])
    logger.info(f"Running operators - {_op_desc}")

    bar = Bar("Processing", max=len(revisions) * len(operators))
    state.operators = operators

    # Index all files the first time, only scan changes afterward
    seed = True
    prev_roots = None
    try:
        with multiprocessing.Pool(processes=len(operators)) as pool:
            for revision in revisions:
                # Checkout target revision
                archiver.checkout(revision, config.checkout_options)
                stats = {"operator_data": {}}

                if seed:
                    targets = config.targets
                else:  # Only target changed files
                    # TODO : Check that changed files are children of the targets
                    targets = [
                        str(pathlib.Path(config.path) / pathlib.Path(file))
                        for file in revision.files
                        # if any([True for target in config.targets if
                        #         target in pathlib.Path(pathlib.Path(config.path) / pathlib.Path(file)).parents])
                    ]

                # Run each operator as a separate process
                data = pool.starmap(
                    run_operator,
                    [(operator, revision, config, targets) for operator in operators],
                )
                # data is a list of tuples, where for each operator, it is a tuple of length 2,
                operator_data_len = 2
                # second element in the tuple, i.e data[i][1]) has the collected data
                for i in range(0, len(operators)):
                    if i < len(data) and len(data[i]) >= operator_data_len and len(data[i][1]) == 0:
                        logger.warn(f"In revision {revision.key}, for operator {operators[i].name}: No data collected")

                # Map the data back into a dictionary
                for operator_name, result in data:
                    # find all unique directories in the results
                    roots = {pathlib.Path(entry).parents[0] for entry in result.keys()}
                    indices = set(result.keys())

                    # For a seed run, there is no previous change set, so use current
                    if seed:
                        prev_roots = roots
                        prev_indices = indices
                    roots = prev_roots | roots

                    # Copy the ir from any unchanged files from the prev revision
                    if not seed:
                        missing_indices = prev_indices - indices
                        # TODO: Check existence of file path.
                        for missing in missing_indices:
                            # Don't copy aggregate keys as their values may have changed
                            if missing in roots:
                                continue
                            # previous index may not have that operator
                            if operator_name not in prev_stats["operator_data"]:
                                continue
                            # previous index may not have file either
                            if (
                                missing
                                not in prev_stats["operator_data"][operator_name]
                            ):
                                continue
                            result[missing] = prev_stats["operator_data"][
                                operator_name
                            ][missing]

                    # Aggregate metrics across all root paths using the aggregate function in the metric
                    for root in roots:
                        # find all matching entries recursively
                        aggregates = [
                            path
                            for path in result.keys()
                            if root in pathlib.Path(path).parents
                        ]
                        result[str(root)] = {"total": {}}
                        # aggregate values
                        for metric in resolve_operator(operator_name).cls.metrics:
                            func = metric.aggregate
                            values = [
                                result[aggregate]["total"][metric.name]
                                for aggregate in aggregates
                                if aggregate in result
                                and metric.name in result[aggregate]["total"]
                            ]
                            if len(values) > 0:
                                result[str(root)]["total"][metric.name] = func(values)

                    prev_indices = set(result.keys())
                    prev_roots = roots
                    stats["operator_data"][operator_name] = result
                    bar.next()

                prev_stats = stats
                seed = False
                ir = index.add(revision, operators=operators)
                ir.store(config, archiver, stats)
        index.save()
        bar.finish()
    except Exception as e:
        logger.error(f"Failed to build cache: {type(e)}: '{e}'")
        raise e
    finally:
        # Reset the archive after every run back to the head of the branch
        archiver.finish()
コード例 #11
0
    """Handle lack-of-cache error, prompt user for index process."""
    logger.error(
        f"Could not locate wily cache, the cache is required to provide insights."
    )
    p = input("Do you want to run setup and index your project now? [y/N]")
    if p.lower() != "y":
        exit(1)
    else:
        revisions = input(
            "How many previous git revisions do you want to index? : ")
        revisions = int(revisions)
        path = input(
            "Path to your source files; comma-separated for multiple: ")
        paths = path.split(",")
        context.invoke(build,
                       max_revisions=revisions,
                       targets=paths,
                       operators=None)


if __name__ == "__main__":  # pragma: no cover
    try:
        cli()
    except Exception as runtime:
        logger.error(
            f"Oh no, Wily crashed! See {WILY_LOG_NAME} for information.")
        logger.info(
            f"If you think this crash was unexpected, please raise an issue at https://github.com/tonybaloney/wily/issues and copy the log file into the issue report along with some information on what you were doing."
        )
        logger.debug(traceback.format_exc())
コード例 #12
0
ファイル: diff.py プロジェクト: ubaumann/wily
def diff(config,
         files,
         metrics,
         changes_only=True,
         detail=True,
         revision=None):
    """
    Show the differences in metrics for each of the files.

    :param config: The wily configuration
    :type  config: :namedtuple:`wily.config.WilyConfig`

    :param files: The files to compare.
    :type  files: ``list`` of ``str``

    :param metrics: The metrics to measure.
    :type  metrics: ``list`` of ``str``

    :param changes_only: Only include changes files in output.
    :type  changes_only: ``bool``

    :param detail: Show details (function-level)
    :type  detail: ``bool``

    :param revision: Compare with specific revision
    :type  revision: ``str``
    """
    config.targets = files
    files = list(files)
    state = State(config)

    # Resolve target paths when the cli has specified --path
    if config.path != DEFAULT_PATH:
        targets = [str(Path(config.path) / Path(file)) for file in files]
    else:
        targets = files

    # Expand directories to paths
    files = [
        os.path.relpath(fn, config.path)
        for fn in radon.cli.harvest.iter_filenames(targets)
    ]
    logger.debug(f"Targeting - {files}")

    if not revision:
        target_revision = state.index[state.default_archiver].last_revision
    else:
        rev = resolve_archiver(
            state.default_archiver).cls(config).find(revision)
        logger.debug(f"Resolved {revision} to {rev.key} ({rev.message})")
        try:
            target_revision = state.index[state.default_archiver][rev.key]
        except KeyError:
            logger.error(
                f"Revision {revision} is not in the cache, make sure you have run wily build."
            )
            exit(1)

    logger.info(
        f"Comparing current with {format_revision(target_revision.revision.key)} by {target_revision.revision.author_name} on {format_date(target_revision.revision.date)}."
    )

    # Convert the list of metrics to a list of metric instances
    operators = {resolve_operator(metric.split(".")[0]) for metric in metrics}
    metrics = [(metric.split(".")[0], resolve_metric(metric))
               for metric in metrics]
    results = []

    # Build a set of operators
    with multiprocessing.Pool(processes=len(operators)) as pool:
        operator_exec_out = pool.starmap(run_operator,
                                         [(operator, None, config, targets)
                                          for operator in operators])
    data = {}
    for operator_name, result in operator_exec_out:
        data[operator_name] = result

    # Write a summary table
    extra = []
    for operator, metric in metrics:
        if detail and resolve_operator(operator).level == OperatorLevel.Object:
            for file in files:
                try:
                    extra.extend([
                        f"{file}:{k}"
                        for k in data[operator][file]["detailed"].keys()
                        if k != metric.name and isinstance(
                            data[operator][file]["detailed"][k], dict)
                    ])
                except KeyError:
                    logger.debug(f"File {file} not in cache")
                    logger.debug("Cache follows -- ")
                    logger.debug(data[operator])
    files.extend(extra)
    logger.debug(files)
    for file in files:
        metrics_data = []
        has_changes = False
        for operator, metric in metrics:
            try:
                current = target_revision.get(config, state.default_archiver,
                                              operator, file, metric.name)
            except KeyError:
                current = "-"
            try:
                new = get_metric(data, operator, file, metric.name)
            except KeyError:
                new = "-"
            if new != current:
                has_changes = True
            if metric.type in (int, float) and new != "-" and current != "-":
                if current > new:
                    metrics_data.append(
                        "{0:n} -> \u001b[{2}m{1:n}\u001b[0m".format(
                            current, new, BAD_COLORS[metric.measure]))
                elif current < new:
                    metrics_data.append(
                        "{0:n} -> \u001b[{2}m{1:n}\u001b[0m".format(
                            current, new, GOOD_COLORS[metric.measure]))
                else:
                    metrics_data.append("{0:n} -> {1:n}".format(current, new))
            else:
                if current == "-" and new == "-":
                    metrics_data.append("-")
                else:
                    metrics_data.append("{0} -> {1}".format(current, new))
        if has_changes or not changes_only:
            results.append((file, *metrics_data))
        else:
            logger.debug(metrics_data)

    descriptions = [metric.description for operator, metric in metrics]
    headers = ("File", *descriptions)
    if len(results) > 0:
        print(
            # But it still makes more sense to show the newest at the top, so reverse again
            tabulate.tabulate(headers=headers,
                              tabular_data=results,
                              tablefmt=DEFAULT_GRID_STYLE))
コード例 #13
0
ファイル: index.py プロジェクト: ewuerger/dbwily
def index(config, include_message=False):
    """
    Show information about the cache and runtime.

    :param config: The wily configuration
    :type  config: :namedtuple:`wily.config.WilyConfig`

    :param include_message: Include revision messages
    :type  include_message: ``bool``
    """
    state = State(config=config)
    logger.debug("Running show command")
    logger.info("--------Configuration---------")
    logger.info(f"Path: {config.path}")
    logger.info(f"Archiver: {config.archiver}")
    logger.info(f"Operators: {config.operators}")
    logger.info("")
    logger.info("-----------History------------")

    data = []
    for archiver in state.archivers:
        for rev in state.index[archiver].revisions:
            if include_message:
                data.append(
                    (
                        format_revision(rev.revision.key),
                        rev.revision.author_name,
                        rev.revision.message[:MAX_MESSAGE_WIDTH],
                        format_date(rev.revision.date),
                    )
                )
            else:
                data.append(
                    (
                        format_revision(rev.revision.key),
                        rev.revision.author_name,
                        format_date(rev.revision.date),
                    )
                )

    if include_message:
        headers = ("Revision", "Author", "Message", "Date")
    else:
        headers = ("Revision", "Author", "Date")
    print(
        tabulate.tabulate(
            headers=headers, tabular_data=data, tablefmt=DEFAULT_GRID_STYLE
        )
    )
コード例 #14
0
ファイル: build.py プロジェクト: alegonz/wily
def build(config, archiver, operators):
    """
    Build the history given a archiver and collection of operators.

    :param config: The wily configuration
    :type  config: :namedtuple:`wily.config.WilyConfig`

    :param archiver: The archiver to use
    :type  archiver: :namedtuple:`wily.archivers.Archiver`

    :param operators: The list of operators to execute
    :type operators: `list` of :namedtuple:`wily.operators.Operator`
    """
    try:
        logger.debug(f"Using {archiver.name} archiver module")
        archiver = archiver.cls(config)
        revisions = archiver.revisions(config.path, config.max_revisions)
    except InvalidGitRepositoryError:
        # TODO: This logic shouldn't really be here (SoC)
        logger.info(
            f"Defaulting back to the filesystem archiver, not a valid git repo"
        )
        archiver = FilesystemArchiver(config)
        revisions = archiver.revisions(config.path, config.max_revisions)
    except Exception as e:
        if hasattr(e, "message"):
            logger.error(f"Failed to setup archiver: '{e.message}'")
        else:
            logger.error(f"Failed to setup archiver: '{type(e)} - {e}'")
        exit(1)

    state = State(config, archiver=archiver)
    # Check for existence of cache, else provision
    state.ensure_exists()

    index = state.index[archiver.name]

    # remove existing revisions from the list
    revisions = [revision for revision in revisions if revision not in index]

    logger.info(
        f"Found {len(revisions)} revisions from '{archiver.name}' archiver in '{config.path}'."
    )

    _op_desc = ",".join([operator.name for operator in operators])
    logger.info(f"Running operators - {_op_desc}")

    bar = Bar("Processing", max=len(revisions) * len(operators))
    state.operators = operators
    try:
        with multiprocessing.Pool(processes=len(operators)) as pool:
            for revision in revisions:
                # Checkout target revision
                archiver.checkout(revision, config.checkout_options)
                stats = {"operator_data": {}}

                # Run each operator as a seperate process
                data = pool.starmap(
                    run_operator,
                    [(operator, revision, config) for operator in operators],
                )

                # Map the data back into a dictionary
                for operator_name, result in data:
                    # aggregate values to directories
                    roots = []

                    # find all unique directories in the results
                    for entry in result.keys():
                        parent = pathlib.Path(entry).parents[0]
                        if parent not in roots:
                            roots.append(parent)

                    for root in roots:
                        # find all matching entries recursively
                        aggregates = [
                            path for path in result.keys()
                            if root in pathlib.Path(path).parents
                        ]
                        result[str(root)] = {"total": {}}
                        # aggregate values
                        for metric in resolve_operator(
                                operator_name).cls.metrics:
                            func = metric.aggregate
                            values = [
                                result[aggregate]["total"][metric.name]
                                for aggregate in aggregates
                                if aggregate in result
                                and metric.name in result[aggregate]["total"]
                            ]
                            if len(values) > 0:
                                result[str(root)]["total"][metric.name] = func(
                                    values)

                    stats["operator_data"][operator_name] = result
                    bar.next()

                ir = index.add(revision, operators=operators)
                ir.store(config, archiver, stats)
        index.save()
        bar.finish()
    except Exception as e:
        logger.error(f"Failed to build cache: '{e}'")
        raise e
    finally:
        # Reset the archive after every run back to the head of the branch
        archiver.finish()
コード例 #15
0
def report(
    config: WilyConfig,
    path: Path,
    metrics: str,
    n: int,
    output: Path,
    include_message: bool = False,
    format: ReportFormat = ReportFormat.CONSOLE,
    console_format: str = None,
) -> None:
    """
    Show information about the cache and runtime.

    :param config: The configuration
    :type  config: :class:`wily.config.WilyConfig`

    :param path: The path to the file
    :type  path: ``str``

    :param metrics: Name of the metric to report on
    :type  metrics: ``str``

    :param n: Number of items to list
    :type  n: ``int``

    :param output: Output path
    :type  output: ``Path``

    :param include_message: Include revision messages
    :type  include_message: ``bool``

    :param format: Output format
    :type  format: ``ReportFormat``

    :param console_format: Grid format style for tabulate
    :type  console_format: ``str``
    """
    logger.debug("Running report command")
    logger.info(f"-----------History for {metrics}------------")

    data = []
    metric_metas = []

    for metric in metrics:
        operator, metric = resolve_metric_as_tuple(metric)
        # Set the delta colors depending on the metric type
        metric_meta = {
            "key": metric.name,
            "operator": operator.name,
            "title": metric.description,
            "type": metric.type,
            "measure": metric.measure,
        }
        metric_metas.append(metric_meta)

    state = State(config)
    for archiver in state.archivers:
        history = state.index[archiver].revisions[:n][::-1]
        last = {}
        for rev in history:
            vals = []
            for meta in metric_metas:
                try:
                    logger.debug(
                        f"Fetching metric {meta['key']} for {meta['operator']} in {path}"
                    )
                    val = rev.get(config, archiver, meta["operator"], path,
                                  meta["key"])
                    last_val = last.get(meta["key"], None)
                    # Measure the difference between this value and the last
                    if meta["type"] in (int, float):
                        delta = val - last_val if last_val else 0
                        change = delta
                    elif last_val:
                        delta = ord(last_val) - ord(
                            val) if last_val != val else 1
                        change = last_val
                    else:
                        delta = 1
                        change = val

                    last[meta["key"]] = val
                    if delta == 0:
                        delta_col = delta
                    elif delta < 0:
                        delta_col = _plant_delta_color(
                            BAD_COLORS[meta["measure"]], change)
                    else:
                        delta_col = _plant_delta_color(
                            GOOD_COLORS[meta["measure"]], change)
                    k = _plant_delta(val, delta_col)
                except KeyError as e:
                    k = f"Not found {e}"
                vals.append(k)
            if include_message:
                data.append((
                    format_revision(rev.revision.key),
                    rev.revision.message[:MAX_MESSAGE_WIDTH],
                    rev.revision.author_name,
                    format_date(rev.revision.date),
                    *vals,
                ))
            else:
                data.append((
                    format_revision(rev.revision.key),
                    rev.revision.author_name,
                    format_date(rev.revision.date),
                    *vals,
                ))
    descriptions = [meta["title"] for meta in metric_metas]
    if include_message:
        headers = (_("Revision"), _("Message"), _("Author"), _("Date"),
                   *descriptions)
    else:
        headers = (_("Revision"), _("Author"), _("Date"), *descriptions)

    if format in FORMAT_MAP:
        FORMAT_MAP[format](path, output, data, headers)
        return

    print(
        tabulate.tabulate(headers=headers,
                          tabular_data=data[::-1],
                          tablefmt=console_format))
コード例 #16
0
def report(
    config,
    path,
    metrics,
    n,
    output,
    include_message=False,
    format=ReportFormat.CONSOLE,
    console_format=None,
):
    """
    Show information about the cache and runtime.

    :param config: The configuration
    :type  config: :class:`wily.config.WilyConfig`

    :param path: The path to the file
    :type  path: ``str``

    :param metrics: Name of the metric to report on
    :type  metrics: ``str``

    :param n: Number of items to list
    :type  n: ``int``

    :param output: Output path
    :type  output: ``Path``

    :param include_message: Include revision messages
    :type  include_message: ``bool``

    :param format: Output format
    :type  format: ``ReportFormat``

    :param console_format: Grid format style for tabulate
    :type  console_format: ``str``
    """
    logger.debug("Running report command")
    logger.info(f"-----------History for {metrics}------------")

    data = []
    metric_metas = []

    for metric in metrics:
        operator, metric = resolve_metric_as_tuple(metric)
        key = metric.name
        operator = operator.name
        # Set the delta colors depending on the metric type
        if metric.measure == MetricType.AimHigh:
            good_color = 32
            bad_color = 31
        elif metric.measure == MetricType.AimLow:
            good_color = 31
            bad_color = 32
        elif metric.measure == MetricType.Informational:
            good_color = 33
            bad_color = 33
        metric_meta = {
            "key": key,
            "operator": operator,
            "good_color": good_color,
            "bad_color": bad_color,
            "title": metric.description,
            "type": metric.type,
        }
        metric_metas.append(metric_meta)

    state = State(config)
    for archiver in state.archivers:
        # We have to do it backwards to get the deltas between releases
        history = state.index[archiver].revisions[:n][::-1]
        last = {}
        for rev in history:
            vals = []
            for meta in metric_metas:
                try:
                    logger.debug(
                        f"Fetching metric {meta['key']} for {meta['operator']} in {path}"
                    )
                    val = rev.get(config, archiver, meta["operator"], path,
                                  meta["key"])

                    last_val = last.get(meta["key"], None)
                    # Measure the difference between this value and the last
                    if meta["type"] in (int, float):
                        if last_val:
                            delta = val - last_val
                        else:
                            delta = 0
                        last[meta["key"]] = val
                    else:
                        # TODO : Measure ranking increases/decreases for str types?
                        delta = 0

                    if delta == 0:
                        delta_col = delta
                    elif delta < 0:
                        delta_col = f"\u001b[{meta['good_color']}m{delta:n}\u001b[0m"
                    else:
                        delta_col = f"\u001b[{meta['bad_color']}m+{delta:n}\u001b[0m"

                    if meta["type"] in (int, float):
                        k = f"{val:n} ({delta_col})"
                    else:
                        k = f"{val}"
                except KeyError as e:
                    k = f"Not found {e}"
                vals.append(k)
            if include_message:
                data.append((
                    format_revision(rev.revision.key),
                    rev.revision.message[:MAX_MESSAGE_WIDTH],
                    rev.revision.author_name,
                    format_date(rev.revision.date),
                    *vals,
                ))
            else:
                data.append((
                    format_revision(rev.revision.key),
                    rev.revision.author_name,
                    format_date(rev.revision.date),
                    *vals,
                ))
    descriptions = [meta["title"] for meta in metric_metas]
    if include_message:
        headers = ("Revision", "Message", "Author", "Date", *descriptions)
    else:
        headers = ("Revision", "Author", "Date", *descriptions)

    if format == ReportFormat.HTML:
        if output.is_file and output.suffix == ".html":
            report_path = output.parents[0]
            report_output = output
        else:
            report_path = output
            report_output = output.joinpath("index.html")

        report_path.mkdir(exist_ok=True, parents=True)

        templates_dir = (Path(__file__).parents[1] / "templates").resolve()
        report_template = Template(
            (templates_dir / "report_template.html").read_text())

        table_headers = "".join([f"<th>{header}</th>" for header in headers])
        table_content = ""
        for line in data[::-1]:
            table_content += "<tr>"
            for element in line:
                element = element.replace("[32m", "<span class='green-color'>")
                element = element.replace("[31m", "<span class='red-color'>")
                element = element.replace("[33m",
                                          "<span class='orange-color'>")
                element = element.replace("[0m", "</span>")
                table_content += f"<td>{element}</td>"
            table_content += "</tr>"

        report_template = report_template.safe_substitute(
            headers=table_headers, content=table_content)

        with report_output.open("w") as output:
            output.write(report_template)

        try:
            copytree(str(templates_dir / "css"), str(report_path / "css"))
        except FileExistsError:
            pass

        logger.info(f"wily report was saved to {report_path}")
    else:
        print(
            # But it still makes more sense to show the newest at the top, so reverse again
            tabulate.tabulate(headers=headers,
                              tabular_data=data[::-1],
                              tablefmt=console_format))
コード例 #17
0
def rank(config, path, metric, revision_index, limit, threshold, descending):
    """
    Rank command ordering files, methods or functions using metrics.

    :param config: The configuration
    :type config: :class:'wily.config.WilyConfig'

    :param path: The path to the file
    :type path ''str''

    :param metric: Name of the metric to report on
    :type metric: ''str''

    :param revision_index: Version of git repository to revert to.
    :type revision_index: ``str``

    :param limit: Limit the number of items in the table
    :type  limit: ``int``

    :param threshold: For total values beneath the threshold return a non-zero exit code
    :type  threshold: ``int``

    :return: Sorted table of all files in path, sorted in order of metric.
    """
    logger.debug("Running rank command")

    data = []

    operator, metric = resolve_metric_as_tuple(metric)
    operator = operator.name

    state = State(config)

    if not revision_index:
        target_revision = state.index[state.default_archiver].last_revision
    else:
        rev = resolve_archiver(
            state.default_archiver).cls(config).find(revision_index)
        logger.debug(f"Resolved {revision_index} to {rev.key} ({rev.message})")
        try:
            target_revision = state.index[state.default_archiver][rev.key]
        except KeyError:
            logger.error(
                f"Revision {revision_index} is not in the cache, make sure you have run wily build."
            )
            exit(1)

    logger.info(
        f"-----------Rank for {metric.description} for {format_revision(target_revision.revision.key)} by {target_revision.revision.author_name} on {format_date(target_revision.revision.date)}.------------"
    )

    if path is None:
        files = target_revision.get_paths(config, state.default_archiver,
                                          operator)
        logger.debug(f"Analysing {files}")
    else:
        # Resolve target paths when the cli has specified --path
        if config.path != DEFAULT_PATH:
            targets = [str(Path(config.path) / Path(path))]
        else:
            targets = [path]

        # Expand directories to paths
        files = [
            os.path.relpath(fn, config.path)
            for fn in radon.cli.harvest.iter_filenames(targets)
        ]
        logger.debug(f"Targeting - {files}")

    for item in files:
        for archiver in state.archivers:
            try:
                logger.debug(
                    f"Fetching metric {metric.name} for {operator} in {str(item)}"
                )
                val = target_revision.get(config, archiver, operator,
                                          str(item), metric.name)
                value = val
                data.append((item, value))
            except KeyError:
                logger.debug(f"Could not find file {item} in index")

    # Sort by ideal value
    data = sorted(data, key=op.itemgetter(1), reverse=descending)

    if limit:
        data = data[:limit]

    # Tack on the total row at the end
    total = metric.aggregate(rev[1] for rev in data)
    data.append(["Total", total])

    headers = ("File", metric.description)
    print(
        tabulate.tabulate(headers=headers,
                          tabular_data=data,
                          tablefmt=DEFAULT_GRID_STYLE))

    if threshold and total < threshold:
        logger.error(
            f"Total value below the specified threshold: {total} < {threshold}"
        )
        exit(1)
コード例 #18
0
ファイル: report.py プロジェクト: Kilo59/wily
def report(config, path, metrics, n, include_message=False):
    """
    Show information about the cache and runtime.

    :param config: The configuration
    :type  config: :class:`wily.config.WilyConfig`

    :param path: The path to the file
    :type  path: ``str``

    :param metrics: Name of the metric to report on
    :type  metrics: ``str``

    :param n: Number of items to list
    :type  n: ``int``

    :param include_message: Include revision messages
    :type  include_message: ``bool``
    """
    logger.debug("Running report command")
    logger.info(f"-----------History for {metrics}------------")

    data = []
    metric_metas = []

    for metric in metrics:
        operator, key = metric.split(".")
        metric = resolve_metric(metric)
        # Set the delta colors depending on the metric type
        if metric.measure == MetricType.AimHigh:
            good_color = 32
            bad_color = 31
        elif metric.measure == MetricType.AimLow:
            good_color = 31
            bad_color = 32
        elif metric.measure == MetricType.Informational:
            good_color = 33
            bad_color = 33
        metric_meta = {
            "key": key,
            "operator": operator,
            "good_color": good_color,
            "bad_color": bad_color,
            "title": metric.description,
            "type": metric.type,
        }
        metric_metas.append(metric_meta)

    state = State(config)
    for archiver in state.archivers:
        # We have to do it backwards to get the deltas between releases
        history = state.index[archiver].revisions[:n][::-1]
        last = {}
        for rev in history:
            vals = []
            for meta in metric_metas:
                try:
                    logger.debug(
                        f"Fetching metric {meta['key']} for {meta['operator']} in {path}"
                    )
                    val = rev.get(config, archiver, meta["operator"], path,
                                  meta["key"])

                    last_val = last.get(meta["key"], None)
                    # Measure the difference between this value and the last
                    if meta["type"] in (int, float):
                        if last_val:
                            delta = val - last_val
                        else:
                            delta = 0
                        last[meta["key"]] = val
                    else:
                        # TODO : Measure ranking increases/decreases for str types?
                        delta = 0

                    if delta == 0:
                        delta_col = delta
                    elif delta < 0:
                        delta_col = f"\u001b[{meta['good_color']}m{delta:n}\u001b[0m"
                    else:
                        delta_col = f"\u001b[{meta['bad_color']}m+{delta:n}\u001b[0m"

                    if meta["type"] in (int, float):
                        k = f"{val:n} ({delta_col})"
                    else:
                        k = f"{val}"
                except KeyError as e:
                    k = f"Not found {e}"
                vals.append(k)
            if include_message:
                data.append((
                    format_revision(rev.revision.key),
                    rev.revision.message[:MAX_MESSAGE_WIDTH],
                    rev.revision.author_name,
                    format_date(rev.revision.date),
                    *vals,
                ))
            else:
                data.append((
                    format_revision(rev.revision.key),
                    rev.revision.author_name,
                    format_date(rev.revision.date),
                    *vals,
                ))
    descriptions = [meta["title"] for meta in metric_metas]
    if include_message:
        headers = ("Revision", "Message", "Author", "Date", *descriptions)
    else:
        headers = ("Revision", "Author", "Date", *descriptions)
    print(
        # But it still makes more sense to show the newest at the top, so reverse again
        tabulate.tabulate(headers=headers,
                          tabular_data=data[::-1],
                          tablefmt=DEFAULT_GRID_STYLE))