def get_contributions(self, parsed_args): def rows(): for filename in parsed_args.contribution_list: LOG.debug('reading %s', filename) with open(filename, 'r', encoding='utf-8') as f: reader = csv.DictReader(f) yield from reader data = rows() roles = parsed_args.role if roles: data = (d for d in data if d['Role'] in roles) ignore_teams = set(t.lower() for t in parsed_args.ignore_team) if ignore_teams: data = (d for d in data if d['Team'].lower() not in ignore_teams) only_teams = set(t.lower() for t in parsed_args.only_team) if only_teams: data = (d for d in data if d['Team'].lower() in only_teams) if parsed_args.only_sponsors: sponsor_map = sponsors.Sponsors(parsed_args.sponsor_level) data = (d for d in data if d['Organization'] in sponsor_map) ignore_tags = set(parsed_args.ignore_tag) if ignore_tags: team_data = governance.Governance( url=parsed_args.governance_project_list) data = (d for d in data if not team_data.get_repo_tags( d['Project']).intersection(ignore_tags)) only_tags = set(parsed_args.only_tag) if only_tags: team_data = governance.Governance( url=parsed_args.governance_project_list) data = ( d for d in data if only_tags.issubset(team_data.get_repo_tags(d['Project']))) if parsed_args.highlight_sponsors: sponsor_map = sponsors.Sponsors(parsed_args.sponsor_level) def filter_sponsors(row): row['Organization'] = sponsor_map[row['Organization']] return row data = (filter_sponsors(d) for d in data) return data
def take_action(self, parsed_args): columns = ('Team', 'Repo', 'Env', 'Status') gov_dat = governance.Governance(url=parsed_args.project_list) if parsed_args.team: repos = gov_dat.get_repos_for_team(parsed_args.team) else: repos = gov_dat.get_repos() teams_and_repos = sorted( (gov_dat.get_repo_owner(r), r) for r in repos ) data = [ (team, r, env, status) for team, r in teams_and_repos for env, status in check_one(parsed_args.repo_base_dir, r) if team != 'Infrastructure' ] if parsed_args.errors_only: data = [ r for r in data if r[-1] != 'OK' ] return (columns, data)
def take_action(self, parsed_args): team_data = governance.Governance( url=parsed_args.governance_project_list) review_ids = [] cache = self.app._load_cache_file(preload=False) factory = gerrit.ReviewFactory(cache) review_source = factory.query(parsed_args.query_string) for review in review_source: team_name = team_data.get_repo_owner(review.project) if not parsed_args.include_unofficial and not team_name: LOG.debug( 'filtered out %s based on repo governance status', review.project, ) continue review_ids.append(review.id) with open(parsed_args.review_list, 'w', encoding='utf-8') as f: f.write('# QUERY: {}\n'.format( parsed_args.query_string.replace('\n', ' '))) for rid in sorted(review_ids): f.write('{}\n'.format(rid))
def take_action(self, parsed_args): gov_dat = governance.Governance(url=parsed_args.project_list) try: for repo in gov_dat.get_repos_for_team(parsed_args.team): print(repo) except ValueError as err: print(err) return 1
def take_action(self, parsed_args): gov_dat = governance.Governance(url=parsed_args.project_list) repos = gov_dat.get_repos() teams_and_repos = sorted( (gov_dat.get_repo_owner(r), r) for r in repos ) workdir = os.path.realpath(parsed_args.workdir) for team, r in teams_and_repos: if team == 'Infrastructure': LOG.info('skipping %s', r) continue team_dir = os.path.join(workdir, team).replace(' ', '-') if not os.path.exists(team_dir): LOG.info('creating %s', team_dir) os.mkdir(team_dir) tracking_file = os.path.join(team_dir, 'master') gitutils.clone_repo(team_dir, r) bad_envs = [ env for env, status in check_one(team_dir, r) if status != 'OK' ] if not bad_envs: LOG.info('nothing to change for %s', r) shutil.rmtree(os.path.join(team_dir, r)) continue try: fix_one(team_dir, r, bad_envs) except Exception: LOG.exception('failed to update {}'.format(r)) continue LOG.info('adding %s to %s', r, tracking_file) with open(tracking_file, 'a', encoding='utf-8') as f: f.write('{}\n'.format(r))
def take_action(self, parsed_args): raise RuntimeError('Do not use this tool. ' 'See https://review.openstack.org/607902') gov_dat = governance.Governance(url=parsed_args.project_list) repos = gov_dat.get_repos() teams_and_repos = sorted( (gov_dat.get_repo_owner(r), r) for r in repos ) workdir = os.path.realpath(parsed_args.workdir) for team, r in teams_and_repos: if team == 'Infrastructure': LOG.info('skipping %s', r) continue if not applies_to_repo(r): LOG.info('skipping %s', r) continue team_dir = os.path.join(workdir, team).replace(' ', '-') if not os.path.exists(team_dir): LOG.info('creating %s', team_dir) os.mkdir(team_dir) tracking_file = os.path.join(team_dir, 'master') gitutils.clone_repo(team_dir, r) status = check_one(team_dir, r) if status in ('OK', 'not needed'): LOG.info('nothing to change for %s', r) shutil.rmtree(os.path.join(team_dir, r)) continue try: fix_one(team_dir, r) except Exception: LOG.exception('failed to update {}'.format(r)) continue LOG.info('adding %s to %s', r, tracking_file) with open(tracking_file, 'a', encoding='utf-8') as f: f.write('{}\n'.format(r))
def take_action(self, parsed_args): clone_script = os.path.join(_TOOLS_DIR, 'clone_repo.sh') if not os.path.exists(parsed_args.workdir): LOG.info('creating working directory %s', parsed_args.workdir) os.makedirs(parsed_args.workdir) repos = parsed_args.repos if not repos: gov_dat = governance.Governance(url=parsed_args.project_list) repos = gov_dat.get_repos_for_team(parsed_args.team) try: for repo in repos: if os.path.exists(os.path.join(parsed_args.workdir, repo)): LOG.info('\n%s exists, skipping', repo) continue LOG.info('\n%s cloning', repo) subprocess.run( [clone_script, '--workspace', parsed_args.workdir, repo], check=True, ) except ValueError as err: print(err) return 1
def take_action(self, parsed_args): yaml = projectconfig_ruamellib.YAML() gov_dat = governance.Governance(url=parsed_args.project_list) all_repos = set(gov_dat.get_repos()) if not all_repos: raise ValueError('found no governed repositories') project_filename = os.path.join( parsed_args.project_config_dir, 'zuul.d', 'projects.yaml', ) LOG.debug('loading project settings from %s', project_filename) with open(project_filename, 'r', encoding='utf-8') as f: project_settings = yaml.load(f) for entry in project_settings: if 'project' not in entry: continue project = entry['project'] if project['name'] not in all_repos: continue if 'templates' not in project: continue templates = project['templates'] for candidate in self.CANDIDATES: try: idx = templates.index(candidate) except (ValueError, IndexError): pass else: LOG.info('updating %s', project['name']) templates[idx] = 'publish-to-pypi-python3' with open(project_filename, 'w', encoding='utf-8') as f: yaml.dump(project_settings, f)
def setUp(self): super().setUp() self.gov = governance.Governance(TEAM_DATA)
def take_action(self, parsed_args): team_data = governance.Governance( url=parsed_args.governance_project_list) cache = self.app._load_cache_file(preload=False) factory = gerrit.ReviewFactory(cache) member_factory = foundation.MemberFactory(cache) canonical_orgs = organizations.Organizations() if os.path.exists(parsed_args.db_file): if not parsed_args.force: print('ERROR: {} already exists. ' 'Use the --force flag to overwrite.'.format( parsed_args.db_file)) return 1 else: os.unlink(parsed_args.db_file) db = sqlite3.connect(parsed_args.db_file) db.execute(SQL_CREATE) def get_data(): review_source = factory.query(parsed_args.query_string) for review in review_source: team_name = team_data.get_repo_owner(review.project) if not parsed_args.include_unofficial and not team_name: LOG.debug( 'filtered out %s based on repo governance status', review.project, ) continue if parsed_args.include_plus_one: participants = itertools.chain( review.participants, review.plus_ones, ) else: participants = review.participants for participant in participants: # Figure out which organization the user was # affiliated with at the time of the work. organization = None member = member_factory.fetch(participant.email) if member: affiliation = member.find_affiliation(participant.date) if affiliation and affiliation.organization: organization = canonical_orgs[ affiliation.organization] else: organization = canonical_orgs.from_email( participant.email) if not organization: organization = "*unknown" yield (review.id, review.url, review.branch, review.project, team_name, participant.role, participant.name, participant.email, participant.date, organization) cursor = db.cursor() data = get_data() while True: chunk = list(itertools.islice(data, 100)) if not chunk: break LOG.debug('inserting %d', len(chunk)) cursor.executemany(SQL_INSERT, chunk) db.commit()
def take_action(self, parsed_args): gov_dat = governance.Governance(url=parsed_args.project_list) sb_config = storyboard.get_config(parsed_args.config_file) LOG.debug('finding champion assignments') sbc = storyboard.get_client(sb_config) story = sbc.stories.get(id='2002586') assignments = {} for task in story.tasks.get_all(): if task.assignee_id: user = sbc.users.get(id=task.assignee_id) assignments[task.title] = user.full_name else: assignments[task.title] = '' cleanup_changes = get_cleanup_changes_by_team() changes = all_changes(False) # We aren't going to migrate the settings for the infra team. interesting_teams = gov_dat.get_teams() interesting_teams.remove('Infrastructure') # The loci team had no work to do. interesting_teams.remove('loci') count_init = { team: 0 for team in interesting_teams } team_counts = { title: collections.Counter(count_init) for title, subject in self._subjects } open_counts = { title: collections.Counter(count_init) for title, subject in self._subjects } unreviewed_counts = collections.Counter(count_init) fail_counts = collections.Counter(count_init) subject_lookup = { subject: title for title, subject_list in self._subjects for subject in subject_list } all_titles = tuple(t for t, s in self._subjects) LOG.debug('counting in-tree changes') for c in changes: status = c.get('status') if status == 'ABANDONED': continue item = {gov_dat.get_repo_owner(c.get('project')) or 'other': 1} title = subject_lookup.get(c.get('subject')) if not title: continue team_counts[title].update(item) if c.get('status') != 'MERGED': open_counts[title].update(item) verified_votes = count_votes(c, 'Verified') if verified_votes.get(-1) or verified_votes.get(-2): fail_counts.update(item) # We count reviewers as anyone posting +/- 1 or +/- 2 # votes on a patch. reviewed_votes = count_votes(c, 'Code-Review') reviewers = ( sum(reviewed_votes.values()) - reviewed_votes.get(0, 0) ) if not reviewers: unreviewed_counts.update(item) columns = ( ('Team',) + all_titles + ('Failing', 'Unreviewed', 'Total', 'Champion') ) def get_done_value(title, team, done_msg='+'): if title != 'zuul': return done_msg if not team_counts['zuul'][team]: n_repos = len(list(gov_dat.get_repos_for_team(team))) return 'not started, {} repos'.format(n_repos) cleanup = cleanup_changes.get(team.lower()) if not cleanup: return 'cleanup patch not found' workflow_votes = count_votes(cleanup, 'Workflow') if cleanup.get('status') == 'MERGED': return done_msg if open_counts['zuul'][team]: return 'in progress' if workflow_votes.get(-1): if parsed_args.minimal: return 'ready for cleanup' return 'need to remove WIP from {}{}'.format( self._url_base, cleanup.get('_number')) if parsed_args.minimal: return 'waiting for cleanup' return 'waiting for cleanup {}{}'.format( self._url_base, cleanup.get('_number')) def format_count(title, team, done_msg='+'): oc = open_counts[title].get(team, 0) tc = team_counts[title].get(team, 0) if tc: if oc: return '{:3}/{:3}'.format(oc, tc) return get_done_value(title, team, done_msg) return '-' data = [ (team,) + tuple(format_count(t, team) for t in all_titles) + ( fail_counts.get(team, 0), unreviewed_counts.get(team, 0), sum(v.get(team, 0) for v in team_counts.values()), assignments.get(team, '') ) for team in sorted(interesting_teams, key=lambda x: x.lower()) ] # How many projects needed changes of this type? needed_counts = { title: 0 for title in all_titles } # How many projects have completed the changes of this type? done_counts = { title: 0 for title in all_titles } for row in data: for i, t in enumerate(all_titles, 1): if row[i] == '-': # ignore this row for this column continue needed_counts[t] += 1 if row[i] == '+': done_counts[t] += 1 summary_lines = {} for title, count in done_counts.items(): summary_lines[title] = '{:3}/{:3}'.format( count, needed_counts[title]) total_fail = sum(fail_counts.values()) total_unreviewed = sum(unreviewed_counts.values()) total_all = sum(sum(v.values()) for v in team_counts.values()) data.append( ('',) + tuple(summary_lines.get(t, '') for t in all_titles) + ( total_fail, total_unreviewed, total_all, '') ) if parsed_args.only_open: data = [ row for row in data if ''.join(row[1:4]).strip('+-') ] return (columns, data)
def take_action(self, parsed_args): gov_dat = governance.Governance(url=parsed_args.project_list) only_open = not parsed_args.all LOG.debug('only_open %s', only_open) changes = all_changes(only_open) if parsed_args.team: repos = set(gov_dat.get_repos_for_team(parsed_args.team)) LOG.debug('filtering on %s', repos) changes = ( c for c in changes if c.get('project') in repos ) if parsed_args.repo: changes = ( c for c in changes if c.get('project') == parsed_args.repo ) if parsed_args.imports: changes = ( c for c in changes if c.get('subject') == self._import_subject ) rows = list(get_one_row(c, gov_dat) for c in changes) LOG.debug('rows: %s', len(rows)) if not parsed_args.repo and not parsed_args.imports: LOG.debug('looking for cleanup changes') cleanup_changes = get_cleanup_changes_by_team() to_add = [] if parsed_args.team: if parsed_args.team.lower() in cleanup_changes: to_add.append(cleanup_changes[parsed_args.team.lower()]) else: for team, change in cleanup_changes.items(): change['_TEAM'] = team to_add.append(change) if to_add: if only_open: to_add = ( c for c in to_add if c.get('status') not in ('MERGED', 'ABANDONED') ) extra_rows = ( get_one_row(c, gov_dat) for c in to_add ) rows.extend(extra_rows) rows = sorted(rows, key=lambda r: (r[1], r[5], r[4])) if parsed_args.team: columns = ('Subject', 'Repo', 'Tests', 'Workflow', 'URL', 'Branch', 'Owner') data = ( r[:2] + r[3:] for r in rows ) else: columns = ('Subject', 'Repo', 'Team', 'Tests', 'Workflow', 'URL', 'Branch', 'Owner') data = rows return (columns, data)
def take_action(self, parsed_args): yaml = projectconfig_ruamellib.YAML() project_filename = os.path.join( parsed_args.project_config_dir, 'zuul.d', 'projects.yaml', ) LOG.debug('loading project settings from %s', project_filename) with open(project_filename, 'r', encoding='utf-8') as f: project_settings = yaml.load(f) zuul_templates_filename = os.path.join( parsed_args.openstack_zuul_jobs_dir, 'zuul.d', 'project-templates.yaml', ) LOG.debug('loading project templates from %s', zuul_templates_filename) with open(zuul_templates_filename, 'r', encoding='utf-8') as f: zuul_templates_raw = yaml.load(f) zuul_templates = { pt['project-template']['name']: pt['project-template'] for pt in zuul_templates_raw if 'project-template' in pt } zuul_jobs_filename = os.path.join( parsed_args.openstack_zuul_jobs_dir, 'zuul.d', 'jobs.yaml', ) LOG.debug('loading jobs from %s', zuul_jobs_filename) with open(zuul_jobs_filename, 'r', encoding='utf-8') as f: zuul_jobs_raw = yaml.load(f) zuul_jobs = { job['job']['name']: job['job'] for job in zuul_jobs_raw if 'job' in job } repos = parsed_args.repos if not repos: gov_dat = governance.Governance(url=parsed_args.project_list) repos = gov_dat.get_repos_for_team(parsed_args.team) for repo in repos: LOG.debug('looking for settings for %s', repo) for idx, entry in enumerate(project_settings): if 'project' not in entry: continue if entry['project'].get('name') == repo: break else: LOG.warning('Could not find {} in {}'.format( repo, project_filename)) continue find_templates_to_retain( entry['project'], zuul_templates, zuul_jobs, ) find_jobs_to_retain(entry['project']) print() if need_to_keep(entry): yaml.dump([entry], self.app.stdout) else: print('# No settings to retain for {}.\n'.format(repo)) del project_settings[idx] if parsed_args.dry_run: LOG.debug('not writing project settings to %s', project_filename) return 0 LOG.debug('writing project settings to %s', project_filename) # The YAML representation removes existing blank lines between # the "- project:" blocks. This code reformats the YAML output # to restore the blank lines and ensure that the file always # ends in a newline. buffer = io.StringIO() yaml.dump(project_settings, buffer) body = buffer.getvalue() parts = body.split('- project:') body = '\n\n- project:'.join(p.rstrip() for p in parts) + '\n' with open(project_filename, 'w', encoding='utf-8') as f: f.write(body)