def data(): metric_names = set(widget_metrics) diff = flask.request.form['diff'].encode('UTF-8') metric_config = GenerateOptions.from_yaml( yaml.load(io.open('generate_config.yaml').read()), ) parsers = get_metric_parsers_from_args( metric_config.metric_package_names, skip_defaults=False, ) metrics = get_metrics(diff, parsers) metrics = [ metric for metric in metrics if metric.value and metric.name in metric_names ] commit_deltas = sorted([ CommitDeltaPresenter.from_data( metric.name, DeltaPresenter('javascript:;', metric.value), ) for metric in metrics ]) return simplejson.dumps({ 'metrics': render_template('widget.mako', commit_deltas=commit_deltas) })
def data() -> str: metric_names = frozenset(flask.g.config.widget_metrics) diff = flask.request.form['diff'].encode('UTF-8') metric_config = GenerateOptions.from_yaml( yaml.load(open('generate_config.yaml').read()), ) parsers = get_metric_parsers_from_args( metric_config.metric_package_names, skip_defaults=False, ) metrics = get_metrics(BLANK_COMMIT, diff, parsers, metric_config.exclude) metrics = tuple( metric for metric in metrics if metric.value and metric.name in metric_names ) commit_deltas = sorted( CommitDelta.from_data( metric.name, Delta('javascript:;', metric.value), color_overrides=flask.g.config.color_overrides, ) for metric in metrics ) return json.dumps({ 'metrics': render_template('widget.mako', commit_deltas=commit_deltas), })
def main(argv: Optional[Sequence[str]] = None) -> int: parser = argparse.ArgumentParser(description='List metric parsers') # optional options.add_color(parser) options.add_generate_config_filename(parser) parsed_args = parser.parse_args(argv) color_setting = parsed_args.color in ('always', 'auto') args = get_options_from_config(parsed_args.config_filename) metric_parsers = get_metric_parsers_from_args( args.metric_package_names, args.skip_default_metrics, ) metric_parsers_sorted = sorted( metric_parsers, key=lambda cls: cls.__module__ + cls.__name__, ) for metric_parser_cls in metric_parsers_sorted: print( '{} {}'.format( color(metric_parser_cls.__module__, CYAN, color_setting), metric_parser_cls.__name__, ), ) for name, description in metric_parser_cls().get_metrics_info(): description = f': {description}' if description else '' print(f' {name}{description}') return 0
def data(): metric_names = frozenset(flask.g.config.widget_metrics) diff = flask.request.form['diff'].encode('UTF-8') metric_config = GenerateOptions.from_yaml( yaml.load(io.open('generate_config.yaml').read()), ) parsers = get_metric_parsers_from_args( metric_config.metric_package_names, skip_defaults=False, ) metrics = get_metrics(Commit.blank, diff, parsers, metric_config.exclude) metrics = [ metric for metric in metrics if metric.value and metric.name in metric_names ] commit_deltas = sorted([ CommitDelta.from_data( metric.name, Delta('javascript:;', metric.value), color_overrides=flask.g.config.color_overrides, ) for metric in metrics ]) return json.dumps({ 'metrics': render_template('widget.mako', commit_deltas=commit_deltas), })
def main(argv=None): parser = argparse.ArgumentParser(description='List metric parsers') # optional options.add_color(parser) options.add_skip_default_metrics(parser) options.add_metric_package_names(parser) args = parser.parse_args(argv) color_setting = args.color in ('always', 'auto') metric_parsers = get_metric_parsers_from_args( args.metric_package_names, args.skip_default_metrics, ) metric_parsers_sorted = sorted( metric_parsers, key=lambda cls: cls.__module__ + cls.__name__ ) for metric_parser_cls in metric_parsers_sorted: print( '{0} {1} {2!r}'.format( color(metric_parser_cls.__module__, CYAN, color_setting), metric_parser_cls.__name__, sorted(metric_parser_cls().get_possible_metric_ids()), ) )
def main(argv=None): argv = argv if argv is not None else sys.argv[1:] parser = argparse.ArgumentParser(description='List metric parsers') # optional options.add_color(parser) options.add_skip_default_metrics(parser) options.add_metric_package_names(parser) args = parser.parse_args(argv) color_setting = args.color in ('always', 'auto') metric_parsers = get_metric_parsers_from_args( args.metric_package_names, args.skip_default_metrics, ) metric_parsers_sorted = sorted( metric_parsers, key=lambda cls: cls.__module__ + cls.__name__ ) for metric_parser_cls in metric_parsers_sorted: print( '{0} {1} {2!r}'.format( color(metric_parser_cls.__module__, CYAN, color_setting), metric_parser_cls.__name__, sorted(metric_parser_cls().get_possible_metric_ids()), ) )
def main(argv=None): parser = argparse.ArgumentParser(description='List metric parsers') # optional options.add_color(parser) options.add_generate_config_filename(parser) parsed_args = parser.parse_args(argv) color_setting = parsed_args.color in ('always', 'auto') args = get_options_from_config(parsed_args.config_filename) metric_parsers = get_metric_parsers_from_args( args.metric_package_names, args.skip_default_metrics, ) metric_parsers_sorted = sorted( metric_parsers, key=lambda cls: cls.__module__ + cls.__name__, ) for metric_parser_cls in metric_parsers_sorted: print( '{} {}'.format( color(metric_parser_cls.__module__, CYAN, color_setting), metric_parser_cls.__name__, ), ) for name, description in metric_parser_cls().get_metrics_info(): description = ': {}'.format(description) if description else '' print(' {}{}'.format(name, description))
def populate_metric_ids(db, package_names, skip_defaults): metric_parsers = get_metric_parsers_from_args(package_names, skip_defaults) metric_ids = get_metric_ids(metric_parsers) for metric_id in metric_ids: db.execute( "INSERT INTO metric_names ('name') VALUES (?)", [metric_id] )
def populate_metric_ids( db: sqlite3.Connection, package_names: List[str], skip_defaults: bool, ) -> None: metric_parsers = get_metric_parsers_from_args(package_names, skip_defaults) metrics_info = get_metrics_info(metric_parsers) insert_metrics_info(db, metrics_info)
def test_populate_metric_ids(tmpdir): db_path = os.path.join(tmpdir.strpath, 'db.db') with sqlite3.connect(db_path) as db: create_schema(db) populate_metric_ids(db, tuple(), False) results = db.execute('SELECT * FROM metric_names').fetchall() # Smoke test assertion assert (len(results) == len( get_metric_ids(get_metric_parsers_from_args(tuple(), False))))
def load_data( database_file, repo, package_names, skip_defaults, ): metric_parsers = get_metric_parsers_from_args(package_names, skip_defaults) with sqlite3.connect(database_file) as db: metric_mapping = get_metric_mapping(db) repo_parser = RepoParser(repo) with repo_parser.repo_checked_out(): previous_sha = get_previous_sha(db) commits = repo_parser.get_commits(since_sha=previous_sha) # If there is nothing to check gtfo if len(commits) == 1 and previous_sha is not None: return # Maps metric_name to a running value metric_values = collections.defaultdict(int) # Grab the state of our metrics at the last place compare_commit = None if previous_sha is not None: compare_commit = commits[0] metric_values.update( get_metric_values( db, compare_commit.sha, )) commits = commits[1:] mp_args = six.moves.zip( [compare_commit] + commits, commits, itertools.repeat(repo_parser), itertools.repeat(metric_parsers), ) pool = multiprocessing.pool.Pool(15) for commit, metrics in six.moves.zip( commits, pool.imap(_get_metrics_inner, mp_args), ): increment_metric_values(metric_values, metrics) insert_metric_values( db, metric_values, metric_mapping, commit, ) insert_metric_changes(db, metrics, metric_mapping, commit)
def load_data( database_file: str, repo: str, repo_type: str, package_names: List[str], skip_defaults: bool, exclude: Pattern[bytes], jobs: int, ) -> None: metric_parsers = get_metric_parsers_from_args(package_names, skip_defaults) with sqlite3.connect(database_file) as db: metric_mapping = get_metric_mapping(db) has_data = get_metric_has_data(db) if repo_type == 'git': repo_parser = GitRepoParser(repo) elif repo_type == 'hg': repo_parser = HgRepoParser(repo) with repo_parser.repo_checked_out(): previous_sha = get_previous_sha(db) commits = repo_parser.get_commits(since_sha=previous_sha) # If there is nothing to check gtfo if len(commits) == 1 and previous_sha is not None: return # Maps metric_id to a running value metric_values: Counter[int] = collections.Counter() # Grab the state of our metrics at the last place compare_commit = None if previous_sha is not None: compare_commit = commits.pop(0) metric_values.update(get_metric_values(db, compare_commit.sha)) mp_args = zip( [compare_commit, *commits], commits, itertools.repeat(repo_parser), itertools.repeat(metric_parsers), itertools.repeat(exclude), ) with mapper(jobs) as do_map: for commit, metrics in zip( commits, do_map(_get_metrics_inner, mp_args), ): update_has_data(db, metrics, metric_mapping, has_data) increment_metrics(metric_values, metric_mapping, metrics) insert_metric_values(db, metric_values, has_data, commit) insert_metric_changes(db, metrics, metric_mapping, commit)
def load_data( database_file, repo, package_names, skip_defaults, exclude, jobs, ): metric_parsers = get_metric_parsers_from_args(package_names, skip_defaults) with WriteableDatabaseLogic.for_sqlite(database_file) as db_logic: metric_mapping = db_logic.get_metric_mapping() has_data = db_logic.get_metric_has_data() repo_parser = RepoParser(repo) with repo_parser.repo_checked_out(): previous_sha = db_logic.get_previous_sha() commits = repo_parser.get_commits(since_sha=previous_sha) # If there is nothing to check gtfo if len(commits) == 1 and previous_sha is not None: return # Maps metric_id to a running value metric_values = collections.Counter() # Grab the state of our metrics at the last place compare_commit = None if previous_sha is not None: compare_commit = commits.pop(0) metric_values.update( db_logic.get_metric_values(compare_commit.sha)) mp_args = six.moves.zip( [compare_commit] + commits, commits, itertools.repeat(repo_parser), itertools.repeat(metric_parsers), itertools.repeat(exclude), ) with mapper(jobs) as do_map: for commit, metrics in six.moves.zip( commits, do_map(_get_metrics_inner, mp_args), ): db_logic.update_has_data(metrics, metric_mapping, has_data) increment_metrics(metric_values, metric_mapping, metrics) db_logic.insert_metric_values(metric_values, has_data, commit) db_logic.insert_metric_changes(metrics, metric_mapping, commit)
def test_populate_metric_ids(tmpdir): db_path = os.path.join(tmpdir.strpath, 'db.db') with sqlite3.connect(db_path) as db: create_schema(db) populate_metric_ids(db, tuple(), False) results = db.execute('SELECT * FROM metric_names').fetchall() # Smoke test assertion assert ( len(results) == len(get_metric_ids(get_metric_parsers_from_args(tuple(), False))) )
def load_data( database_file, repo, package_names, skip_defaults, exclude, jobs, ): metric_parsers = get_metric_parsers_from_args(package_names, skip_defaults) with sqlite3.connect(database_file) as db: metric_mapping = get_metric_mapping(db) # type: Dict[str, int] has_data = get_metric_has_data(db) # type: Dict[int, bool] repo_parser = RepoParser(repo) with repo_parser.repo_checked_out(): previous_sha = get_previous_sha(db) commits = repo_parser.get_commits(since_sha=previous_sha) # If there is nothing to check gtfo if len(commits) == 1 and previous_sha is not None: return # Maps metric_id to a running value metric_values = collections.Counter() # type: Counter[int] # Grab the state of our metrics at the last place compare_commit = None if previous_sha is not None: compare_commit = commits.pop(0) metric_values.update(get_metric_values(db, compare_commit.sha)) mp_args = six.moves.zip( [compare_commit] + commits, commits, itertools.repeat(repo_parser), itertools.repeat(metric_parsers), itertools.repeat(exclude), ) do_map = mapper(jobs) for commit, metrics in six.moves.zip( commits, do_map(_get_metrics_inner, mp_args), ): update_has_data(db, metrics, metric_mapping, has_data) increment_metric_values(metric_values, metric_mapping, metrics) insert_metric_values(db, metric_values, has_data, commit) insert_metric_changes(db, metrics, metric_mapping, commit)
def load_data( database_file, repo, package_names, skip_defaults, ): metric_parsers = get_metric_parsers_from_args(package_names, skip_defaults) with sqlite3.connect(database_file) as db: metric_mapping = get_metric_mapping(db) repo_parser = RepoParser(repo) with repo_parser.repo_checked_out(): previous_sha = get_previous_sha(db) commits = repo_parser.get_commits(since_sha=previous_sha) # If there is nothing to check gtfo if len(commits) == 1 and previous_sha is not None: return # Maps metric_name to a running value metric_values = collections.defaultdict(int) # Grab the state of our metrics at the last place compare_commit = None if previous_sha is not None: compare_commit = commits[0] metric_values.update(get_metric_values( db, compare_commit.sha, )) commits = commits[1:] mp_args = six.moves.zip( [compare_commit] + commits, commits, itertools.repeat(repo_parser), itertools.repeat(metric_parsers), ) pool = multiprocessing.pool.Pool(15) for commit, metrics in six.moves.zip( commits, pool.imap(_get_metrics_inner, mp_args), ): increment_metric_values(metric_values, metrics) insert_metric_values( db, metric_values, metric_mapping, commit, ) insert_metric_changes(db, metrics, metric_mapping, commit)
def load_data( database_file, repo, package_names, skip_defaults, tempdir_location, ): metric_parsers = get_metric_parsers_from_args(package_names, skip_defaults) with sqlite3.connect(database_file) as db: metric_mapping = get_metric_mapping(db) repo_parser = RepoParser(repo, tempdir_location=tempdir_location) with repo_parser.repo_checked_out(): previous_sha = get_previous_sha(db) commits = repo_parser.get_commits(since_sha=previous_sha) # If there is nothing to check gtfo if len(commits) == 1 and previous_sha is not None: return # Maps metric_name to a running value metric_values = collections.defaultdict(int) # Grab the state of our metrics at the last place compare_commit = None if previous_sha is not None: compare_commit = commits[0] metric_values.update(get_metric_values( db, compare_commit.sha, )) commits = commits[1:] for commit in commits: if compare_commit is None: diff = repo_parser.get_original_commit(commit.sha) else: diff = repo_parser.get_commit_diff( compare_commit.sha, commit.sha, ) metrics = get_metrics(diff, metric_parsers) increment_metric_values(metric_values, metrics) insert_metric_values(db, metric_values, metric_mapping, commit) insert_metric_changes(db, metrics, metric_mapping, commit) compare_commit = commit
def load_data( database_file, repo, package_names, skip_defaults, exclude, jobs, ): metric_parsers = get_metric_parsers_from_args(package_names, skip_defaults) with sqlite3.connect(database_file) as db: metric_mapping = get_metric_mapping(db) has_data = get_metric_has_data(db) repo_parser = RepoParser(repo) with repo_parser.repo_checked_out(): previous_sha = get_previous_sha(db) commits = repo_parser.get_commits(since_sha=previous_sha) # If there is nothing to check gtfo if len(commits) == 1 and previous_sha is not None: return # Maps metric_id to a running value metric_values = collections.Counter() # Grab the state of our metrics at the last place compare_commit = None if previous_sha is not None: compare_commit = commits.pop(0) metric_values.update(get_metric_values(db, compare_commit.sha)) mp_args = six.moves.zip( [compare_commit] + commits, commits, itertools.repeat(repo_parser), itertools.repeat(metric_parsers), itertools.repeat(exclude), ) with mapper(jobs) as do_map: for commit, metrics in six.moves.zip( commits, do_map(_get_metrics_inner, mp_args), ): update_has_data(db, metrics, metric_mapping, has_data) increment_metrics(metric_values, metric_mapping, metrics) insert_metric_values(db, metric_values, has_data, commit) insert_metric_changes(db, metrics, metric_mapping, commit)
def main(argv): parser = argparse.ArgumentParser(description='List metric parsers') parser.add_argument( '--skip-default-metrics', default=False, action='store_true', help='Whether to skip default metrics', ) parser.add_argument( '--color', default='auto', choices=['always', 'never', 'auto'], ) parser.add_argument( 'metric_package_names', type=str, nargs='*', help='Metric Package Names (such as foo.metrics bar.metrics)', ) args = parser.parse_args(argv) color_setting = args.color in ('always', 'auto') metric_parsers = get_metric_parsers_from_args( args.metric_package_names, args.skip_default_metrics, ) metric_parsers_sorted = sorted( metric_parsers, key=lambda cls: cls.__module__ + cls.__name__ ) for metric_parser_cls in metric_parsers_sorted: print( '{0} {1} {2!r}'.format( color(metric_parser_cls.__module__, CYAN, color_setting), metric_parser_cls.__name__, sorted(metric_parser_cls().get_possible_metric_ids()), ) )
def populate_metric_ids(db, package_names, skip_defaults): metric_parsers = get_metric_parsers_from_args(package_names, skip_defaults) metric_ids = get_metric_ids(metric_parsers) write_logic.insert_metric_ids(db, metric_ids)
def populate_metric_ids(db, package_names, skip_defaults): metric_parsers = get_metric_parsers_from_args(package_names, skip_defaults) metrics_info = get_metrics_info(metric_parsers) insert_metrics_info(db, metrics_info)
def populate_metric_ids(db_logic, package_names, skip_defaults): metric_parsers = get_metric_parsers_from_args(package_names, skip_defaults) metrics_info = get_metrics_info(metric_parsers) db_logic.insert_metrics_info(metrics_info)