def index(config, include_message=False): """ Show information about the cache and runtime. :param config: The wily configuration :type config: :namedtuple:`wily.config.WilyConfig` :param include_message: Include revision messages :type include_message: ``bool`` """ state = State(config=config) logger.debug("Running show command") logger.info("--------Configuration---------") logger.info(f"Path: {config.path}") logger.info(f"Archiver: {config.archiver}") logger.info(f"Operators: {config.operators}") logger.info("") logger.info("-----------History------------") data = [] for archiver in state.archivers: for rev in state.index[archiver].revisions: if include_message: data.append( ( format_revision(rev.revision.key), rev.revision.author_name, rev.revision.message[:MAX_MESSAGE_WIDTH], format_date(rev.revision.date), ) ) else: data.append( ( format_revision(rev.revision.key), rev.revision.author_name, format_date(rev.revision.date), ) ) if include_message: headers = ("Revision", "Author", "Message", "Date") else: headers = ("Revision", "Author", "Date") print( tabulate.tabulate( headers=headers, tabular_data=data, tablefmt=DEFAULT_GRID_STYLE ) )
def diff(config, files, metrics, changes_only=True, detail=True): """ Show the differences in metrics for each of the files. :param config: The wily configuration :type config: :namedtuple:`wily.config.WilyConfig` :param files: The files to compare. :type files: ``list`` of ``str`` :param metrics: The metrics to measure. :type metrics: ``list`` of ``str`` :param changes_only: Only include changes files in output. :type changes_only: ``bool`` :param detail: Show details (function-level) :type detail: ``bool`` """ config.targets = files files = list(files) state = State(config) last_revision = state.index[state.default_archiver].revisions[0] # Convert the list of metrics to a list of metric instances operators = {resolve_operator(metric.split(".")[0]) for metric in metrics} metrics = [(metric.split(".")[0], resolve_metric(metric)) for metric in metrics] data = {} results = [] # Build a set of operators _operators = [operator.cls(config) for operator in operators] cwd = os.getcwd() os.chdir(config.path) for operator in _operators: logger.debug(f"Running {operator.name} operator") data[operator.name] = operator.run(None, config) os.chdir(cwd) # Write a summary table.. extra = [] for operator, metric in metrics: if detail and resolve_operator(operator).level == OperatorLevel.Object: for file in files: try: extra.extend( [ f"{file}:{k}" for k in data[operator][file]["detailed"].keys() if k != metric.name and isinstance(data[operator][file]["detailed"][k], dict) ] ) except KeyError: logger.debug(f"File {file} not in cache") logger.debug("Cache follows -- ") logger.debug(data[operator]) files.extend(extra) logger.debug(files) for file in files: metrics_data = [] has_changes = False for operator, metric in metrics: try: current = last_revision.get( config, state.default_archiver, operator, file, metric.name ) except KeyError as e: current = "-" try: new = get_metric(data, operator, file, metric.name) except KeyError as e: new = "-" if new != current: has_changes = True if metric.type in (int, float) and new != "-" and current != "-": if current > new: metrics_data.append( "{0:n} -> \u001b[{2}m{1:n}\u001b[0m".format( current, new, BAD_COLORS[metric.measure] ) ) elif current < new: metrics_data.append( "{0:n} -> \u001b[{2}m{1:n}\u001b[0m".format( current, new, GOOD_COLORS[metric.measure] ) ) else: metrics_data.append("{0:n} -> {1:n}".format(current, new)) else: if current == "-" and new == "-": metrics_data.append("-") else: metrics_data.append("{0} -> {1}".format(current, new)) if has_changes or not changes_only: results.append((file, *metrics_data)) else: logger.debug(metrics_data) descriptions = [metric.description for operator, metric in metrics] headers = ("File", *descriptions) if len(results) > 0: print( # But it still makes more sense to show the newest at the top, so reverse again tabulate.tabulate( headers=headers, tabular_data=results, tablefmt=DEFAULT_GRID_STYLE ) )
def rank(config, path, metric, revision_index, limit, threshold, descending): """ Rank command ordering files, methods or functions using metrics. :param config: The configuration :type config: :class:'wily.config.WilyConfig' :param path: The path to the file :type path ''str'' :param metric: Name of the metric to report on :type metric: ''str'' :param revision_index: Version of git repository to revert to. :type revision_index: ``str`` :param limit: Limit the number of items in the table :type limit: ``int`` :param threshold: For total values beneath the threshold return a non-zero exit code :type threshold: ``int`` :return: Sorted table of all files in path, sorted in order of metric. """ logger.debug("Running rank command") data = [] operator, metric = resolve_metric_as_tuple(metric) operator = operator.name state = State(config) if not revision_index: target_revision = state.index[state.default_archiver].last_revision else: rev = resolve_archiver( state.default_archiver).cls(config).find(revision_index) logger.debug(f"Resolved {revision_index} to {rev.key} ({rev.message})") try: target_revision = state.index[state.default_archiver][rev.key] except KeyError: logger.error( f"Revision {revision_index} is not in the cache, make sure you have run wily build." ) exit(1) logger.info( f"-----------Rank for {metric.description} for {format_revision(target_revision.revision.key)} by {target_revision.revision.author_name} on {format_date(target_revision.revision.date)}.------------" ) if path is None: files = target_revision.get_paths(config, state.default_archiver, operator) logger.debug(f"Analysing {files}") else: # Resolve target paths when the cli has specified --path if config.path != DEFAULT_PATH: targets = [str(Path(config.path) / Path(path))] else: targets = [path] # Expand directories to paths files = [ os.path.relpath(fn, config.path) for fn in radon.cli.harvest.iter_filenames(targets) ] logger.debug(f"Targeting - {files}") for item in files: for archiver in state.archivers: try: logger.debug( f"Fetching metric {metric.name} for {operator} in {str(item)}" ) val = target_revision.get(config, archiver, operator, str(item), metric.name) value = val data.append((item, value)) except KeyError: logger.debug(f"Could not find file {item} in index") # Sort by ideal value data = sorted(data, key=op.itemgetter(1), reverse=descending) if limit: data = data[:limit] # Tack on the total row at the end total = metric.aggregate(rev[1] for rev in data) data.append(["Total", total]) headers = ("File", metric.description) print( tabulate.tabulate(headers=headers, tabular_data=data, tablefmt=DEFAULT_GRID_STYLE)) if threshold and total < threshold: logger.error( f"Total value below the specified threshold: {total} < {threshold}" ) exit(1)
def report( config, path, metrics, n, output, include_message=False, format=ReportFormat.CONSOLE, console_format=None, ): """ Show information about the cache and runtime. :param config: The configuration :type config: :class:`wily.config.WilyConfig` :param path: The path to the file :type path: ``str`` :param metrics: Name of the metric to report on :type metrics: ``str`` :param n: Number of items to list :type n: ``int`` :param output: Output path :type output: ``Path`` :param include_message: Include revision messages :type include_message: ``bool`` :param format: Output format :type format: ``ReportFormat`` :param console_format: Grid format style for tabulate :type console_format: ``str`` """ logger.debug("Running report command") logger.info(f"-----------History for {metrics}------------") data = [] metric_metas = [] for metric in metrics: operator, metric = resolve_metric_as_tuple(metric) key = metric.name operator = operator.name # Set the delta colors depending on the metric type if metric.measure == MetricType.AimHigh: good_color = 32 bad_color = 31 elif metric.measure == MetricType.AimLow: good_color = 31 bad_color = 32 elif metric.measure == MetricType.Informational: good_color = 33 bad_color = 33 metric_meta = { "key": key, "operator": operator, "good_color": good_color, "bad_color": bad_color, "title": metric.description, "type": metric.type, } metric_metas.append(metric_meta) state = State(config) for archiver in state.archivers: # We have to do it backwards to get the deltas between releases history = state.index[archiver].revisions[:n][::-1] last = {} for rev in history: vals = [] for meta in metric_metas: try: logger.debug( f"Fetching metric {meta['key']} for {meta['operator']} in {path}" ) val = rev.get(config, archiver, meta["operator"], path, meta["key"]) last_val = last.get(meta["key"], None) # Measure the difference between this value and the last if meta["type"] in (int, float): if last_val: delta = val - last_val else: delta = 0 last[meta["key"]] = val else: # TODO : Measure ranking increases/decreases for str types? delta = 0 if delta == 0: delta_col = delta elif delta < 0: delta_col = f"\u001b[{meta['good_color']}m{delta:n}\u001b[0m" else: delta_col = f"\u001b[{meta['bad_color']}m+{delta:n}\u001b[0m" if meta["type"] in (int, float): k = f"{val:n} ({delta_col})" else: k = f"{val}" except KeyError as e: k = f"Not found {e}" vals.append(k) if include_message: data.append(( format_revision(rev.revision.key), rev.revision.message[:MAX_MESSAGE_WIDTH], rev.revision.author_name, format_date(rev.revision.date), *vals, )) else: data.append(( format_revision(rev.revision.key), rev.revision.author_name, format_date(rev.revision.date), *vals, )) descriptions = [meta["title"] for meta in metric_metas] if include_message: headers = ("Revision", "Message", "Author", "Date", *descriptions) else: headers = ("Revision", "Author", "Date", *descriptions) if format == ReportFormat.HTML: if output.is_file and output.suffix == ".html": report_path = output.parents[0] report_output = output else: report_path = output report_output = output.joinpath("index.html") report_path.mkdir(exist_ok=True, parents=True) templates_dir = (Path(__file__).parents[1] / "templates").resolve() report_template = Template( (templates_dir / "report_template.html").read_text()) table_headers = "".join([f"<th>{header}</th>" for header in headers]) table_content = "" for line in data[::-1]: table_content += "<tr>" for element in line: element = element.replace("[32m", "<span class='green-color'>") element = element.replace("[31m", "<span class='red-color'>") element = element.replace("[33m", "<span class='orange-color'>") element = element.replace("[0m", "</span>") table_content += f"<td>{element}</td>" table_content += "</tr>" report_template = report_template.safe_substitute( headers=table_headers, content=table_content) with report_output.open("w") as output: output.write(report_template) try: copytree(str(templates_dir / "css"), str(report_path / "css")) except FileExistsError: pass logger.info(f"wily report was saved to {report_path}") else: print( # But it still makes more sense to show the newest at the top, so reverse again tabulate.tabulate(headers=headers, tabular_data=data[::-1], tablefmt=console_format))
def graph( config, path, metrics, output=None, x_axis=None, changes=True, text=False, aggregate=False, ): """ Graph information about the cache and runtime. :param config: The configuration. :type config: :class:`wily.config.WilyConfig` :param path: The path to the files. :type path: ``list`` :param metrics: The Y and Z-axis metrics to report on. :type metrics: ``tuple`` :param output: Save report to specified path instead of opening browser. :type output: ``str`` """ logger.debug("Running graph command") data = [] state = State(config) abs_path = config.path / pathlib.Path(path) if x_axis is None: x_axis = "history" else: x_operator, x_key = metric_parts(x_axis) y_metric = resolve_metric(metrics[0]) title = f"{x_axis.capitalize()} of {y_metric.description} for {path}{' aggregated' if aggregate else ''}" if abs_path.is_dir() and not aggregate: paths = [ p.relative_to(config.path) for p in pathlib.Path(abs_path).glob("**/*.py") ] else: paths = [path] operator, key = metric_parts(metrics[0]) if len(metrics) == 1: # only y-axis z_axis = None else: z_axis = resolve_metric(metrics[1]) z_operator, z_key = metric_parts(metrics[1]) for path in paths: x = [] y = [] z = [] labels = [] last_y = None for rev in state.index[state.default_archiver].revisions: try: val = rev.get(config, state.default_archiver, operator, str(path), key) if val != last_y or not changes: y.append(val) if z_axis: z.append( rev.get( config, state.default_archiver, z_operator, str(path), z_key, )) if x_axis == "history": x.append(format_datetime(rev.revision.date)) else: x.append( rev.get( config, state.default_archiver, x_operator, str(path), x_key, )) labels.append( f"{rev.revision.author_name} <br>{rev.revision.message}" ) last_y = val except KeyError: # missing data pass # Create traces trace = go.Scatter( x=x, y=y, mode="lines+markers+text" if text else "lines+markers", name=f"{path}", ids=state.index[state.default_archiver].revision_keys, text=labels, marker=dict( size=0 if z_axis is None else z, color=list(range(len(y))), # colorscale='Viridis', ), xcalendar="gregorian", hoveron="points+fills", ) data.append(trace) if output: filename = output auto_open = False else: filename = "wily-report.html" auto_open = True plotly.offline.plot( { "data": data, "layout": go.Layout( title=title, xaxis={"title": x_axis}, yaxis={"title": y_metric.description}, ), }, auto_open=auto_open, filename=filename, )
def build(config, archiver, operators): """ Build the history given a archiver and collection of operators. :param config: The wily configuration :type config: :namedtuple:`wily.config.WilyConfig` :param archiver: The archiver to use :type archiver: :namedtuple:`wily.archivers.Archiver` :param operators: The list of operators to execute :type operators: `list` of :namedtuple:`wily.operators.Operator` """ try: logger.debug(f"Using {archiver.name} archiver module") archiver = archiver.cls(config) revisions = archiver.revisions(config.path, config.max_revisions) except InvalidGitRepositoryError: # TODO: This logic shouldn't really be here (SoC) logger.info(f"Defaulting back to the filesystem archiver, not a valid git repo") archiver = FilesystemArchiver(config) revisions = archiver.revisions(config.path, config.max_revisions) except Exception as e: if hasattr(e, "message"): logger.error(f"Failed to setup archiver: '{e.message}'") else: logger.error(f"Failed to setup archiver: '{type(e)} - {e}'") exit(1) state = State(config, archiver=archiver) # Check for existence of cache, else provision state.ensure_exists() index = state.index[archiver.name] # remove existing revisions from the list revisions = [revision for revision in revisions if revision not in index][::-1] logger.info( f"Found {len(revisions)} revisions from '{archiver.name}' archiver in '{config.path}'." ) _op_desc = ",".join([operator.name for operator in operators]) logger.info(f"Running operators - {_op_desc}") bar = Bar("Processing", max=len(revisions) * len(operators)) state.operators = operators # Index all files the first time, only scan changes afterward seed = True prev_roots = None try: with multiprocessing.Pool(processes=len(operators)) as pool: for revision in revisions: # Checkout target revision archiver.checkout(revision, config.checkout_options) stats = {"operator_data": {}} if seed: targets = config.targets else: # Only target changed files # TODO : Check that changed files are children of the targets targets = [ str(pathlib.Path(config.path) / pathlib.Path(file)) for file in revision.files # if any([True for target in config.targets if # target in pathlib.Path(pathlib.Path(config.path) / pathlib.Path(file)).parents]) ] # Run each operator as a separate process data = pool.starmap( run_operator, [(operator, revision, config, targets) for operator in operators], ) # data is a list of tuples, where for each operator, it is a tuple of length 2, operator_data_len = 2 # second element in the tuple, i.e data[i][1]) has the collected data for i in range(0, len(operators)): if i < len(data) and len(data[i]) >= operator_data_len and len(data[i][1]) == 0: logger.warn(f"In revision {revision.key}, for operator {operators[i].name}: No data collected") # Map the data back into a dictionary for operator_name, result in data: # find all unique directories in the results roots = {pathlib.Path(entry).parents[0] for entry in result.keys()} indices = set(result.keys()) # For a seed run, there is no previous change set, so use current if seed: prev_roots = roots prev_indices = indices roots = prev_roots | roots # Copy the ir from any unchanged files from the prev revision if not seed: missing_indices = prev_indices - indices # TODO: Check existence of file path. for missing in missing_indices: # Don't copy aggregate keys as their values may have changed if missing in roots: continue # previous index may not have that operator if operator_name not in prev_stats["operator_data"]: continue # previous index may not have file either if ( missing not in prev_stats["operator_data"][operator_name] ): continue result[missing] = prev_stats["operator_data"][ operator_name ][missing] # Aggregate metrics across all root paths using the aggregate function in the metric for root in roots: # find all matching entries recursively aggregates = [ path for path in result.keys() if root in pathlib.Path(path).parents ] result[str(root)] = {"total": {}} # aggregate values for metric in resolve_operator(operator_name).cls.metrics: func = metric.aggregate values = [ result[aggregate]["total"][metric.name] for aggregate in aggregates if aggregate in result and metric.name in result[aggregate]["total"] ] if len(values) > 0: result[str(root)]["total"][metric.name] = func(values) prev_indices = set(result.keys()) prev_roots = roots stats["operator_data"][operator_name] = result bar.next() prev_stats = stats seed = False ir = index.add(revision, operators=operators) ir.store(config, archiver, stats) index.save() bar.finish() except Exception as e: logger.error(f"Failed to build cache: {type(e)}: '{e}'") raise e finally: # Reset the archive after every run back to the head of the branch archiver.finish()
def report(config, path, metrics, n, include_message=False): """ Show information about the cache and runtime. :param config: The configuration :type config: :class:`wily.config.WilyConfig` :param path: The path to the file :type path: ``str`` :param metrics: Name of the metric to report on :type metrics: ``str`` :param n: Number of items to list :type n: ``int`` :param include_message: Include revision messages :type include_message: ``bool`` """ logger.debug("Running report command") logger.info(f"-----------History for {metrics}------------") data = [] metric_metas = [] for metric in metrics: operator, key = metric.split(".") metric = resolve_metric(metric) # Set the delta colors depending on the metric type if metric.measure == MetricType.AimHigh: good_color = 32 bad_color = 31 elif metric.measure == MetricType.AimLow: good_color = 31 bad_color = 32 elif metric.measure == MetricType.Informational: good_color = 33 bad_color = 33 metric_meta = { "key": key, "operator": operator, "good_color": good_color, "bad_color": bad_color, "title": metric.description, "type": metric.type, } metric_metas.append(metric_meta) state = State(config) for archiver in state.archivers: # We have to do it backwards to get the deltas between releases history = state.index[archiver].revisions[:n][::-1] last = {} for rev in history: vals = [] for meta in metric_metas: try: logger.debug( f"Fetching metric {meta['key']} for {meta['operator']} in {path}" ) val = rev.get(config, archiver, meta["operator"], path, meta["key"]) last_val = last.get(meta["key"], None) # Measure the difference between this value and the last if meta["type"] in (int, float): if last_val: delta = val - last_val else: delta = 0 last[meta["key"]] = val else: # TODO : Measure ranking increases/decreases for str types? delta = 0 if delta == 0: delta_col = delta elif delta < 0: delta_col = f"\u001b[{meta['good_color']}m{delta:n}\u001b[0m" else: delta_col = f"\u001b[{meta['bad_color']}m+{delta:n}\u001b[0m" if meta["type"] in (int, float): k = f"{val:n} ({delta_col})" else: k = f"{val}" except KeyError as e: k = f"Not found {e}" vals.append(k) if include_message: data.append(( format_revision(rev.revision.key), rev.revision.message[:MAX_MESSAGE_WIDTH], rev.revision.author_name, format_date(rev.revision.date), *vals, )) else: data.append(( format_revision(rev.revision.key), rev.revision.author_name, format_date(rev.revision.date), *vals, )) descriptions = [meta["title"] for meta in metric_metas] if include_message: headers = ("Revision", "Message", "Author", "Date", *descriptions) else: headers = ("Revision", "Author", "Date", *descriptions) print( # But it still makes more sense to show the newest at the top, so reverse again tabulate.tabulate(headers=headers, tabular_data=data[::-1], tablefmt=DEFAULT_GRID_STYLE))
def diff(config, files, metrics, changes_only=True, detail=True, revision=None): """ Show the differences in metrics for each of the files. :param config: The wily configuration :type config: :namedtuple:`wily.config.WilyConfig` :param files: The files to compare. :type files: ``list`` of ``str`` :param metrics: The metrics to measure. :type metrics: ``list`` of ``str`` :param changes_only: Only include changes files in output. :type changes_only: ``bool`` :param detail: Show details (function-level) :type detail: ``bool`` :param revision: Compare with specific revision :type revision: ``str`` """ config.targets = files files = list(files) state = State(config) # Resolve target paths when the cli has specified --path if config.path != DEFAULT_PATH: targets = [str(Path(config.path) / Path(file)) for file in files] else: targets = files # Expand directories to paths files = [ os.path.relpath(fn, config.path) for fn in radon.cli.harvest.iter_filenames(targets) ] logger.debug(f"Targeting - {files}") if not revision: target_revision = state.index[state.default_archiver].last_revision else: rev = resolve_archiver( state.default_archiver).cls(config).find(revision) logger.debug(f"Resolved {revision} to {rev.key} ({rev.message})") try: target_revision = state.index[state.default_archiver][rev.key] except KeyError: logger.error( f"Revision {revision} is not in the cache, make sure you have run wily build." ) exit(1) logger.info( f"Comparing current with {format_revision(target_revision.revision.key)} by {target_revision.revision.author_name} on {format_date(target_revision.revision.date)}." ) # Convert the list of metrics to a list of metric instances operators = {resolve_operator(metric.split(".")[0]) for metric in metrics} metrics = [(metric.split(".")[0], resolve_metric(metric)) for metric in metrics] results = [] # Build a set of operators with multiprocessing.Pool(processes=len(operators)) as pool: operator_exec_out = pool.starmap(run_operator, [(operator, None, config, targets) for operator in operators]) data = {} for operator_name, result in operator_exec_out: data[operator_name] = result # Write a summary table extra = [] for operator, metric in metrics: if detail and resolve_operator(operator).level == OperatorLevel.Object: for file in files: try: extra.extend([ f"{file}:{k}" for k in data[operator][file]["detailed"].keys() if k != metric.name and isinstance( data[operator][file]["detailed"][k], dict) ]) except KeyError: logger.debug(f"File {file} not in cache") logger.debug("Cache follows -- ") logger.debug(data[operator]) files.extend(extra) logger.debug(files) for file in files: metrics_data = [] has_changes = False for operator, metric in metrics: try: current = target_revision.get(config, state.default_archiver, operator, file, metric.name) except KeyError: current = "-" try: new = get_metric(data, operator, file, metric.name) except KeyError: new = "-" if new != current: has_changes = True if metric.type in (int, float) and new != "-" and current != "-": if current > new: metrics_data.append( "{0:n} -> \u001b[{2}m{1:n}\u001b[0m".format( current, new, BAD_COLORS[metric.measure])) elif current < new: metrics_data.append( "{0:n} -> \u001b[{2}m{1:n}\u001b[0m".format( current, new, GOOD_COLORS[metric.measure])) else: metrics_data.append("{0:n} -> {1:n}".format(current, new)) else: if current == "-" and new == "-": metrics_data.append("-") else: metrics_data.append("{0} -> {1}".format(current, new)) if has_changes or not changes_only: results.append((file, *metrics_data)) else: logger.debug(metrics_data) descriptions = [metric.description for operator, metric in metrics] headers = ("File", *descriptions) if len(results) > 0: print( # But it still makes more sense to show the newest at the top, so reverse again tabulate.tabulate(headers=headers, tabular_data=results, tablefmt=DEFAULT_GRID_STYLE))
def build(config, archiver, operators): """ Build the history given a archiver and collection of operators. :param config: The wily configuration :type config: :namedtuple:`wily.config.WilyConfig` :param archiver: The archiver to use :type archiver: :namedtuple:`wily.archivers.Archiver` :param operators: The list of operators to execute :type operators: `list` of :namedtuple:`wily.operators.Operator` """ try: logger.debug(f"Using {archiver.name} archiver module") archiver = archiver.cls(config) revisions = archiver.revisions(config.path, config.max_revisions) except InvalidGitRepositoryError: # TODO: This logic shouldn't really be here (SoC) logger.info( f"Defaulting back to the filesystem archiver, not a valid git repo" ) archiver = FilesystemArchiver(config) revisions = archiver.revisions(config.path, config.max_revisions) except Exception as e: if hasattr(e, "message"): logger.error(f"Failed to setup archiver: '{e.message}'") else: logger.error(f"Failed to setup archiver: '{type(e)} - {e}'") exit(1) state = State(config, archiver=archiver) # Check for existence of cache, else provision state.ensure_exists() index = state.index[archiver.name] # remove existing revisions from the list revisions = [revision for revision in revisions if revision not in index] logger.info( f"Found {len(revisions)} revisions from '{archiver.name}' archiver in '{config.path}'." ) _op_desc = ",".join([operator.name for operator in operators]) logger.info(f"Running operators - {_op_desc}") bar = Bar("Processing", max=len(revisions) * len(operators)) state.operators = operators try: with multiprocessing.Pool(processes=len(operators)) as pool: for revision in revisions: # Checkout target revision archiver.checkout(revision, config.checkout_options) stats = {"operator_data": {}} # Run each operator as a seperate process data = pool.starmap( run_operator, [(operator, revision, config) for operator in operators], ) # Map the data back into a dictionary for operator_name, result in data: # aggregate values to directories roots = [] # find all unique directories in the results for entry in result.keys(): parent = pathlib.Path(entry).parents[0] if parent not in roots: roots.append(parent) for root in roots: # find all matching entries recursively aggregates = [ path for path in result.keys() if root in pathlib.Path(path).parents ] result[str(root)] = {"total": {}} # aggregate values for metric in resolve_operator( operator_name).cls.metrics: func = metric.aggregate values = [ result[aggregate]["total"][metric.name] for aggregate in aggregates if aggregate in result and metric.name in result[aggregate]["total"] ] if len(values) > 0: result[str(root)]["total"][metric.name] = func( values) stats["operator_data"][operator_name] = result bar.next() ir = index.add(revision, operators=operators) ir.store(config, archiver, stats) index.save() bar.finish() except Exception as e: logger.error(f"Failed to build cache: '{e}'") raise e finally: # Reset the archive after every run back to the head of the branch archiver.finish()
def report( config: WilyConfig, path: Path, metrics: str, n: int, output: Path, include_message: bool = False, format: ReportFormat = ReportFormat.CONSOLE, console_format: str = None, ) -> None: """ Show information about the cache and runtime. :param config: The configuration :type config: :class:`wily.config.WilyConfig` :param path: The path to the file :type path: ``str`` :param metrics: Name of the metric to report on :type metrics: ``str`` :param n: Number of items to list :type n: ``int`` :param output: Output path :type output: ``Path`` :param include_message: Include revision messages :type include_message: ``bool`` :param format: Output format :type format: ``ReportFormat`` :param console_format: Grid format style for tabulate :type console_format: ``str`` """ logger.debug("Running report command") logger.info(f"-----------History for {metrics}------------") data = [] metric_metas = [] for metric in metrics: operator, metric = resolve_metric_as_tuple(metric) # Set the delta colors depending on the metric type metric_meta = { "key": metric.name, "operator": operator.name, "title": metric.description, "type": metric.type, "measure": metric.measure, } metric_metas.append(metric_meta) state = State(config) for archiver in state.archivers: history = state.index[archiver].revisions[:n][::-1] last = {} for rev in history: vals = [] for meta in metric_metas: try: logger.debug( f"Fetching metric {meta['key']} for {meta['operator']} in {path}" ) val = rev.get(config, archiver, meta["operator"], path, meta["key"]) last_val = last.get(meta["key"], None) # Measure the difference between this value and the last if meta["type"] in (int, float): delta = val - last_val if last_val else 0 change = delta elif last_val: delta = ord(last_val) - ord( val) if last_val != val else 1 change = last_val else: delta = 1 change = val last[meta["key"]] = val if delta == 0: delta_col = delta elif delta < 0: delta_col = _plant_delta_color( BAD_COLORS[meta["measure"]], change) else: delta_col = _plant_delta_color( GOOD_COLORS[meta["measure"]], change) k = _plant_delta(val, delta_col) except KeyError as e: k = f"Not found {e}" vals.append(k) if include_message: data.append(( format_revision(rev.revision.key), rev.revision.message[:MAX_MESSAGE_WIDTH], rev.revision.author_name, format_date(rev.revision.date), *vals, )) else: data.append(( format_revision(rev.revision.key), rev.revision.author_name, format_date(rev.revision.date), *vals, )) descriptions = [meta["title"] for meta in metric_metas] if include_message: headers = (_("Revision"), _("Message"), _("Author"), _("Date"), *descriptions) else: headers = (_("Revision"), _("Author"), _("Date"), *descriptions) if format in FORMAT_MAP: FORMAT_MAP[format](path, output, data, headers) return print( tabulate.tabulate(headers=headers, tabular_data=data[::-1], tablefmt=console_format))