def run(self, module, options): """ Run the operator. :param module: The target module path. :type module: ``str`` :param options: Any runtime options. :type options: ``dict`` :return: The operator results. :rtype: ``dict`` """ logger.debug("Running halstead harvester") results = {} for filename, details in dict(self.harvester.results).items(): results[filename] = {} for instance in details: if isinstance(instance, list): for item in instance: function, report = item results[filename][function] = self._report_to_dict( report) else: results[filename] = self._report_to_dict(instance) return results
def run(self, module, options): """ Run the operator. :param module: The target module path. :type module: ``str`` :param options: Any runtime options. :type options: ``dict`` :return: The operator results. :rtype: ``dict`` """ logger.debug("Running halstead harvester") results = {} for filename, details in dict(self.harvester.results).items(): results[filename] = {"detailed": {}, "total": {}} for instance in details: if isinstance(instance, list): for item in instance: function, report = item results[filename]["detailed"][function] = self._report_to_dict( report ) else: if isinstance(instance, str) and instance == "error": logger.debug( f"Failed to run Halstead harvester on {filename} : {details['error']}" ) continue results[filename]["total"] = self._report_to_dict(instance) return results
def report(ctx, file, metrics, number, message, format, console_format, output): """Show metrics for a given file.""" config = ctx.obj["CONFIG"] if not exists(config): handle_no_cache(ctx) if not metrics: metrics = get_default_metrics(config) logger.info(f"Using default metrics {metrics}") new_output = Path().cwd() if output: new_output = new_output / Path(output) else: new_output = new_output / "wily_report" / "index.html" from wily.commands.report import report logger.debug(f"Running report on {file} for metric {metrics}") logger.debug(f"Output format is {format}") report( config=config, path=file, metrics=metrics, n=number, output=new_output, include_message=message, format=ReportFormat[format], console_format=console_format, )
def rank(ctx, path, metric, revision, limit, desc): """ Rank files, methods and functions in order of any metrics, e.g. complexity. Some common examples: Rank all .py files within src/ for the maintainability.mi metric $ wily rank src/ maintainability.mi Rank all .py files in the index for the default metrics across all archivers $ wily rank """ config = ctx.obj["CONFIG"] if not exists(config): handle_no_cache(ctx) from wily.commands.rank import rank logger.debug( f"Running rank on {path} for metric {metric} and revision {revision}") rank( config=config, path=path, metric=metric, revision_index=revision, limit=limit, descending=desc, )
def get(self, config, archiver, operator, path, key): """ Get the metric data for this indexed revision. :param config: The wily config. :type config: :class:`wily.config.WilyConfig` :param archiver: The archiver. :type archiver: :class:`wily.archivers.Archiver` :param operator: The operator to find :type operator: ``str`` :param path: The path to find :type path: ``str`` :param key: The metric key :type key: ``str`` """ if not self._data: self._data = cache.get(config=config, archiver=archiver, revision=self.revision.key)["operator_data"] logger.debug(f"Fetching metric {path} - {key} for operator {operator}") return get_metric(self._data, operator, path, key)
def diff(ctx, files, metrics, all, detail, revision): """Show the differences in metrics for each file.""" config = ctx.obj["CONFIG"] if not exists(config): handle_no_cache(ctx) if not metrics: metrics = get_default_metrics(config) logger.info(f"Using default metrics {metrics}") else: metrics = metrics.split(",") logger.info(f"Using specified metrics {metrics}") from wily.commands.diff import diff logger.debug(f"Running diff on {files} for metric {metrics}") diff( config=config, files=files, metrics=metrics, changes_only=not all, detail=detail, revision=revision, )
def run_operator(operator, revision, config, targets): """ Run an operator for the multiprocessing pool. :param operator: The operator name :type operator: ``str`` :param revision: The revision index :type revision: :class:`Revision` :param config: The runtime configuration :type config: :class:`WilyConfig` :param targets: Files/paths to scan :type targets: ``list`` of ``str`` :rtype: ``tuple`` :returns: A tuple of operator name (``str``), and data (``dict``) """ instance = operator.cls(config, targets) logger.debug(f"Running {operator.name} operator on {revision}") data = instance.run(revision, config) # Normalize paths for non-seed passes for key in list(data.keys()): if os.path.isabs(key): rel = os.path.relpath(key, config.path) data[rel] = data[key] del data[key] return operator.name, data
def graph(ctx, path, metrics, output, x_axis, changes): """ Graph a specific metric for a given file, if a path is given, all files within path will be graphed. Some common examples: Graph all .py files within src/ for the raw.loc metric $ wily graph src/ raw.loc Graph test.py against raw.loc and cyclomatic.complexity metrics $ wily graph src/test.py raw.loc cyclomatic.complexity Graph test.py against raw.loc and raw.sloc on the x-axis $ wily graph src/test.py raw.loc --x-axis raw.sloc """ config = ctx.obj["CONFIG"] if not exists(config): handle_no_cache(ctx) from wily.commands.graph import graph logger.debug(f"Running report on {path} for metrics {metrics}") graph( config=config, path=path, metrics=metrics, output=output, x_axis=x_axis, changes=changes, )
def store_archiver_index(config, archiver, index): """ Store an archiver's index record for faster search. :param config: The configuration :type config: :class:`wily.config.WilyConfig` :param archiver: The name of the archiver type (e.g. 'git') :type archiver: ``str`` :param index: The archiver index record :type index: ``dict`` :rtype: `pathlib.Path` """ root = pathlib.Path(config.cache_path) / archiver.name if not root.exists(): root.mkdir() logger.debug("Created archiver directory") index = sorted(index, key=lambda k: k["date"], reverse=True) filename = root / "index.json" with open(filename, "w") as out: out.write(json.dumps(index, indent=2)) logger.debug(f"Created index output") return filename
def cli(ctx, debug, config, path): """ \U0001F98A Inspect and search through the complexity of your source code. To get started, run setup: $ wily setup To reindex any changes in your source code: $ wily build <src> Then explore basic metrics with: $ wily report <file> You can also graph specific metrics in a browser with: $ wily graph <file> <metric> """ ctx.ensure_object(dict) ctx.obj["DEBUG"] = debug if debug: logger.setLevel("DEBUG") else: logger.setLevel("INFO") ctx.obj["CONFIG"] = load_config(config) if path: logger.debug(f"Fixing path to {path}") ctx.obj["CONFIG"].path = path ctx.obj["CONFIG"].cache_path = os.path.join(path, DEFAULT_CACHE_PATH) logger.debug(f"Loaded configuration from {config}")
def diff(ctx, files, metrics, all, detail, revision, format, output): """Show the differences in metrics for each file.""" config = ctx.obj["CONFIG"] if not exists(config): handle_no_cache(ctx) if not metrics: metrics = get_default_metrics(config) logger.info(f"Using default metrics {metrics}") else: metrics = metrics.split(",") logger.info(f"Using specified metrics {metrics}") new_output = output if output is None else Path().cwd() / Path(output) from wily.commands.diff import diff logger.debug(f"Running diff on {files} for metric {metrics}") logger.debug(f"Output format is {format}") diff( config=config, files=files, metrics=metrics, changes_only=not all, detail=detail, output=new_output, revision=revision, format=ReportFormat[format], )
def __init__(self, config, targets): """ Instantiate a new Cyclomatic Complexity operator. :param config: The wily configuration. :type config: :class:`WilyConfig` """ # TODO: Import config for harvester from .wily.cfg logger.debug(f"Using {targets} with {self.defaults} for CC metrics") self.harvester = harvesters.CCHarvester(targets, config=Config(**self.defaults))
def __init__(self, config, targets): """ Instantiate a new raw operator. :param config: The wily configuration. :type config: :class:`WilyConfig` """ # TODO: Use config from wily.cfg for harvester logger.debug(f"Using {targets} with {self.defaults} for Raw metrics") self.harvester = harvesters.RawHarvester( targets, config=Config(**self.defaults))
def __init__(self, config): """ Instantiate a new HC operator. :param config: The wily configuration. :type config: :class:`WilyConfig` """ # TODO : Import config from wily.cfg logger.debug( f"Using {config.targets} with {self.defaults} for HC metrics") self.harvester = harvesters.HCHarvester(config.targets, config=Config(**self.defaults))
def clean(config): """ Delete a wily cache. :param config: The configuration :type config: :class:`wily.config.WilyConfig` """ if not exists(config): logger.debug("Wily cache does not exist, skipping") return shutil.rmtree(config.cache_path) logger.debug("Deleted wily cache")
def store(config, archiver, revision, stats): """ Store a revision record within an archiver folder. :param config: The configuration :type config: :class:`wily.config.WilyConfig` :param archiver: The name of the archiver type (e.g. 'git') :type archiver: ``str`` :param revision: The revision ID :type revision: ``str`` :param stats: The collected data :type stats: ``dict`` :return: The absolute path to the created file :rtype: ``str`` :rtype: `pathlib.Path` """ root = pathlib.Path(config.cache_path) / archiver.name if not root.exists(): logger.debug("Creating wily cache") root.mkdir() # fix absolute path references. if config.path != ".": for operator, operator_data in list(stats["operator_data"].items()): if operator_data: new_operator_data = operator_data.copy() for k, v in list(operator_data.items()): if os.path.isabs(k): new_key = os.path.relpath(str(k), str(config.path)) else: new_key = str(k) del new_operator_data[k] new_operator_data[new_key] = v del stats["operator_data"][operator] stats["operator_data"][operator] = new_operator_data logger.debug(f"Creating {revision.key} output") filename = root / (revision.key + ".json") if filename.exists(): raise RuntimeError( f"File {filename} already exists, index may be corrupt.") with open(filename, "w") as out: out.write(json.dumps(stats, indent=2)) return filename
def build(ctx, max_revisions, targets, operators, archiver): """Build the wily cache.""" config = ctx.obj["CONFIG"] from wily.commands.build import build if max_revisions: logger.debug(f"Fixing revisions to {max_revisions}") config.max_revisions = max_revisions if operators: logger.debug(f"Fixing operators to {operators}") config.operators = operators.strip().split(",") if archiver: logger.debug(f"Fixing archiver to {archiver}") config.archiver = archiver if targets: logger.debug(f"Fixing targets to {targets}") config.targets = targets build( config=config, archiver=resolve_archiver(config.archiver), operators=resolve_operators(config.operators), ) logger.info( "Completed building wily history, run `wily report <file>` or `wily index` to see more." )
def index(config, include_message=False): """ Show information about the cache and runtime. :param config: The wily configuration :type config: :namedtuple:`wily.config.WilyConfig` :param include_message: Include revision messages :type include_message: ``bool`` """ state = State(config=config) logger.debug("Running show command") logger.info("--------Configuration---------") logger.info(f"Path: {config.path}") logger.info(f"Archiver: {config.archiver}") logger.info(f"Operators: {config.operators}") logger.info("") logger.info("-----------History------------") data = [] for archiver in state.archivers: for rev in state.index[archiver].revisions: if include_message: data.append( ( format_revision(rev.revision.key), rev.revision.author_name, rev.revision.message[:MAX_MESSAGE_WIDTH], format_date(rev.revision.date), ) ) else: data.append( ( format_revision(rev.revision.key), rev.revision.author_name, format_date(rev.revision.date), ) ) if include_message: headers = ("Revision", "Author", "Message", "Date") else: headers = ("Revision", "Author", "Date") print( tabulate.tabulate( headers=headers, tabular_data=data, tablefmt=DEFAULT_GRID_STYLE ) )
def run(self, module, options): """ Run the operator. :param module: The target module path. :type module: ``str`` :param options: Any runtime options. :type options: ``dict`` :return: The operator results. :rtype: ``dict`` """ logger.debug("Running raw harvester") return dict(self.harvester.results)
def create(config): """ Create a wily cache. :param config: The configuration :type config: :class:`wily.config.WilyConfig` :return: The path to the cache :rtype: ``str`` """ if exists(config): logger.debug("Wily cache exists, skipping") return config.cache_path logger.debug(f"Creating wily cache {config.cache_path}") pathlib.Path(config.cache_path).mkdir(parents=True, exist_ok=True) create_index(config) return config.cache_path
def run(self, module, options): """ Run the operator. :param module: The target module path. :type module: ``str`` :param options: Any runtime options. :type options: ``dict`` :return: The operator results. :rtype: ``dict`` """ logger.debug("Running maintainability harvester") results = {} for filename, metrics in dict(self.harvester.results).items(): results[filename] = {"total": metrics} return results
def graph(ctx, path, metrics, output, x_axis, changes): """Output report to specified HTML path, e.g. reports/out.html.""" config = ctx.obj["CONFIG"] if not exists(config): handle_no_cache(ctx) from wily.commands.graph import graph logger.debug(f"Running report on {path} for metrics {metrics}") graph( config=config, path=path, metrics=metrics, output=output, x_axis=x_axis, changes=changes, )
def rank(ctx, path, metric, revision, limit, desc, threshold): """Rank files, methods and functions in order of any metrics, e.g. complexity.""" config = ctx.obj["CONFIG"] if not exists(config): handle_no_cache(ctx) from wily.commands.rank import rank logger.debug(f"Running rank on {path} for metric {metric} and revision {revision}") rank( config=config, path=path, metric=metric, revision_index=revision, limit=limit, threshold=threshold, descending=desc, )
def get_paths(self, config, archiver, operator): """ Get the indexed paths for this indexed revision. :param config: The wily config. :type config: :class:`wily.config.WilyConfig` :param archiver: The archiver. :type archiver: :class:`wily.archivers.Archiver` :param operator: The operator to find :type operator: ``str`` """ if not self._data: self._data = cache.get(config=config, archiver=archiver, revision=self.revision.key)["operator_data"] logger.debug(f"Fetching keys") return list(self._data[operator].keys())
def report(ctx, file, metrics, number, message): """Show metrics for a given file.""" config = ctx.obj["CONFIG"] if not exists(config): handle_no_cache(ctx) if not metrics: metrics = get_default_metrics(config) logger.info(f"Using default metrics {metrics}") from wily.commands.report import report logger.debug(f"Running report on {file} for metric {metrics}") report(config=config, path=file, metrics=metrics, n=number, include_message=message)
def run(self, module, options): """ Run the operator. :param module: The target module path. :type module: ``str`` :param options: Any runtime options. :type options: ``dict`` :return: The operator results. :rtype: ``dict`` """ logger.debug("Running CC harvester") results = {} for filename, details in dict(self.harvester.results).items(): results[filename] = {} total = 0 # running CC total for instance in details: if isinstance(instance, Class): i = self._dict_from_class(instance) elif isinstance(instance, Function): i = self._dict_from_function(instance) else: if isinstance(instance, str) and instance == "error": logger.warning( f"Failed to run CC harvester on {filename} : {details['error']}" ) continue else: logger.warning( f"Unexpected result from Radon : {instance} of {type(instance)}. Please report on Github." ) continue results[filename][i["fullname"]] = i del i["fullname"] total += i["complexity"] results[filename]["complexity"] = total return results
def __init__(self, config, archiver=None): """ Instantiate a new process state. :param config: The wily configuration. :type config: :class:`WilyConfig` :param archiver: The archiver (optional). :type archiver: :class:`wily.archivers.Archiver` """ if archiver: self.archivers = [archiver.name] else: self.archivers = cache.list_archivers(config) logger.debug( f"Initialised state indexes for archivers {self.archivers}") self.config = config self.index = {} for archiver in self.archivers: self.index[archiver] = Index(self.config, resolve_archiver(archiver)) self.default_archiver = self.archivers[0]
def ensure_exists(self): """Ensure that cache directory exists.""" if not cache.exists(self.config): logger.debug("Wily cache not found, creating.") cache.create(self.config) logger.debug("Created wily cache") else: logger.debug(f"Cache {self.config.cache_path} exists")
def cli(ctx, debug, config, path, cache): """CLI entry point.""" ctx.ensure_object(dict) ctx.obj["DEBUG"] = debug if debug: logger.setLevel("DEBUG") else: logger.setLevel("INFO") ctx.obj["CONFIG"] = load_config(config) if path: logger.debug(f"Fixing path to {path}") ctx.obj["CONFIG"].path = path if cache: logger.debug(f"Fixing cache to {cache}") ctx.obj["CONFIG"].cache_path = cache logger.debug(f"Loaded configuration from {config}") logger.debug(f"Capturing logs to {WILY_LOG_NAME}")
def diff(config, files, metrics, changes_only=True, detail=True): """ Show the differences in metrics for each of the files. :param config: The wily configuration :type config: :namedtuple:`wily.config.WilyConfig` :param files: The files to compare. :type files: ``list`` of ``str`` :param metrics: The metrics to measure. :type metrics: ``list`` of ``str`` :param changes_only: Only include changes files in output. :type changes_only: ``bool`` :param detail: Show details (function-level) :type detail: ``bool`` """ config.targets = files files = list(files) state = State(config) last_revision = state.index[state.default_archiver].revisions[0] # Convert the list of metrics to a list of metric instances operators = {resolve_operator(metric.split(".")[0]) for metric in metrics} metrics = [(metric.split(".")[0], resolve_metric(metric)) for metric in metrics] data = {} results = [] # Build a set of operators _operators = [operator.cls(config) for operator in operators] cwd = os.getcwd() os.chdir(config.path) for operator in _operators: logger.debug(f"Running {operator.name} operator") data[operator.name] = operator.run(None, config) os.chdir(cwd) # Write a summary table.. extra = [] for operator, metric in metrics: if detail and resolve_operator(operator).level == OperatorLevel.Object: for file in files: try: extra.extend( [ f"{file}:{k}" for k in data[operator][file]["detailed"].keys() if k != metric.name and isinstance(data[operator][file]["detailed"][k], dict) ] ) except KeyError: logger.debug(f"File {file} not in cache") logger.debug("Cache follows -- ") logger.debug(data[operator]) files.extend(extra) logger.debug(files) for file in files: metrics_data = [] has_changes = False for operator, metric in metrics: try: current = last_revision.get( config, state.default_archiver, operator, file, metric.name ) except KeyError as e: current = "-" try: new = get_metric(data, operator, file, metric.name) except KeyError as e: new = "-" if new != current: has_changes = True if metric.type in (int, float) and new != "-" and current != "-": if current > new: metrics_data.append( "{0:n} -> \u001b[{2}m{1:n}\u001b[0m".format( current, new, BAD_COLORS[metric.measure] ) ) elif current < new: metrics_data.append( "{0:n} -> \u001b[{2}m{1:n}\u001b[0m".format( current, new, GOOD_COLORS[metric.measure] ) ) else: metrics_data.append("{0:n} -> {1:n}".format(current, new)) else: if current == "-" and new == "-": metrics_data.append("-") else: metrics_data.append("{0} -> {1}".format(current, new)) if has_changes or not changes_only: results.append((file, *metrics_data)) else: logger.debug(metrics_data) descriptions = [metric.description for operator, metric in metrics] headers = ("File", *descriptions) if len(results) > 0: print( # But it still makes more sense to show the newest at the top, so reverse again tabulate.tabulate( headers=headers, tabular_data=results, tablefmt=DEFAULT_GRID_STYLE ) )