Ejemplo n.º 1
0
def cache_worklogs_and_issues(required_worklogs: Set[str],
                              long_term: bool) -> Dict[str, Dict[str, str]]:
    """
    Workaround for missing Tempo API data. It retrieves `required_worklogs` and caches them along with issues.

    It's possible to regenerate long-term cache by specifying `long_term` argument.
    """
    # Determine whether we're be using long-term cache or short-term one. Set keys and timeout accordingly.
    worklogs_key = settings.CACHE_WORKLOGS_KEY_LONG_TERM if long_term else settings.CACHE_WORKLOGS_KEY
    issues_key = settings.CACHE_ISSUES_KEY_LONG_TERM if long_term else settings.CACHE_ISSUES_KEY
    timeout = settings.CACHE_WORKLOG_TIMEOUT_LONG_TERM if long_term else settings.CACHE_ISSUES_TIMEOUT_SHOT_TERM

    # Check if worklogs are missing from cache.
    required_issues: Set[str] = set()
    worklogs: Dict[str, Dict[str, str]] = _get_cached_dicts_from_keys(
        (settings.CACHE_WORKLOGS_KEY,
         settings.CACHE_WORKLOGS_KEY_LONG_TERM)) if not long_term else {}

    if missing_worklogs := required_worklogs - worklogs.keys():
        with connect_to_jira() as conn:
            retrieved_worklogs: List[Worklog] = conn.worklog_list(
                list(missing_worklogs))  # type: ignore
            for worklog in retrieved_worklogs:
                required_issues.add(worklog.issueId)

        # Check if worklogs are missing from cache.
        issues: Dict[str, Dict[str, str]] = _get_cached_dicts_from_keys(
            (settings.CACHE_ISSUES_KEY,
             settings.CACHE_ISSUES_KEY_LONG_TERM)) if not long_term else {}
        new_issues: Dict[str, Dict[str, str]] = {}

        if missing_issues := required_issues - issues.keys():
            with connect_to_jira() as conn:
                try:
                    retrieved_issues = conn.search_issues(
                        f'id in ({",".join(missing_issues)})',
                        fields='project',
                        maxResults=0)
                except JIRAError:
                    # We can notice this for long-term cache, as Jira has limits for the header size.
                    retrieved_issues = []
                    for chunk in chunks(list(missing_issues), 12):
                        retrieved_issues = conn.search_issues(
                            f'id in ({",".join(chunk)})',
                            fields='project',
                            maxResults=0)
            new_issues = {
                issue.id: {
                    'key': issue.key,
                    'project': issue.fields.project.name
                }
                for issue in retrieved_issues
            }
Ejemplo n.º 2
0
def add_spillover_reminder_comment_task(issue_key: str,
                                        assignee_key: str,
                                        clean_sprint: bool = False) -> None:
    """A task for posting the spillover reason reminder on the issue."""
    message = settings.SPILLOVER_CLEAN_HINTS_MESSAGE if clean_sprint else settings.SPILLOVER_REMINDER_MESSAGE
    with connect_to_jira() as conn:
        conn.add_comment(issue_key, f"[~{assignee_key}], {message}")
Ejemplo n.º 3
0
def create_estimation_session_task() -> None:
    """
    Create a new empty estimation session for every cell.

    An empty estimation session does not contain any issues or participants. The current user is added as the scrum
    master of the session, as without this permission it wouldn't be possible to make any future modifications.

    WARNING
    Creating a session without issues causes some chaos in Jira, as the `/session/async/{sessionId}/rounds/` endpoint
    returns HTTP 500 in such case. It does not break other API calls, so operations like updating, closing, and deleting
    the session (via the API) work correctly. It makes the session unusable via the browser by breaking two views:
        - estimation,
        - configuration.
    Therefore, the decision is to avoid adding the participants to the session until there are issues that can be added
    too. Assuming that the sessions are fully automated, and don't require any manual interventions in the beginning,
    this should not cause any troubles.
    """
    with connect_to_jira() as conn:
        session_name = get_next_poker_session_name(conn)
        for cell in get_cells(conn):
            if not settings.DEBUG:  # We really don't want to trigger this in the dev environment.
                conn.create_poker_session(
                    board_id=cell.board_id,
                    name=session_name,
                    issues=[],
                    participants=[],
                    scrum_masters=[conn.myself()['key']],
                    send_invitations=False,
                )
Ejemplo n.º 4
0
    def _fetch_accounts_chunk(
            from_: str,
            to: str,
            force_regenerate_worklogs=False) -> Dict[str, Dict]:
        """Fetches worklogs by a month, which is much faster."""
        with connect_to_jira() as conn:
            reports = conn.report(from_, to)
        categories: Dict[str, Dict[str, SustainabilityAccount]] = {}
        # HACK: Ugly workaround, because Tempo team utilization report doesn't provide neither ticket's key nor its ID.
        worklog_ids: Set[str] = set()
        for weekly_report in reports.reports:
            for account_type in weekly_report.reports:
                for account_category in account_type.reports:
                    for account_reports in account_category.reports:
                        for report in account_reports.reports:
                            worklog_ids.add(str(report.typeId))

        worklogs = cache_worklogs_and_issues(worklog_ids,
                                             force_regenerate_worklogs)

        for weekly_report in reports.reports:
            for account_type in weekly_report.reports:
                for account_category in account_type.reports:
                    category = categories.setdefault(account_category.name, {})
                    for account_reports in account_category.reports:
                        account = category.setdefault(
                            account_reports.name,
                            SustainabilityAccount(account_reports.name),
                        )
                        account.add_reports(account_reports.reports, worklogs)

        return categories
Ejemplo n.º 5
0
def unflag_tickets_task() -> None:
    """Unflag all tickets from the next sprint."""
    with connect_to_jira() as conn:
        issues = get_next_sprint_issues(conn)
        for issue in issues:
            if not settings.DEBUG:  # We really don't want to trigger this in the dev environment.
                unflag_issue(conn, issue)
Ejemplo n.º 6
0
def create_next_sprint_task(board_id: int) -> int:
    """A task for creating the next sprint for the specified cell."""
    with connect_to_jira() as conn:
        cells = get_cells(conn)
        cell = next(c for c in cells if c.board_id == board_id)
        sprints: List[Sprint] = get_sprints(conn, cell.board_id)

        next_sprint = create_next_sprint(conn, sprints, cell.key, board_id)
    return get_sprint_number(next_sprint)
Ejemplo n.º 7
0
def ping_overcommitted_users_task() -> None:
    """Notify team members about their overcommitment."""
    with connect_to_jira() as conn:
        for cell, users in get_overcommitted_users(conn).items():
            # TODO: Ping sprint managers too. Use the approach from https://github.com/open-craft/sprints/pull/63.
            #  Add more flexibility to the Mattermost library to handle this.
            emails = [user.emailAddress for user in users]
            message = settings.SPRINT_ASYNC_OVERCOMMITMENT_MESSAGE

            if not settings.DEBUG:  # We really don't want to trigger this in the dev environment.
                create_mattermost_post(message, emails=emails, channel=cell)
Ejemplo n.º 8
0
    def retrieve(self, request, pk=None):
        """Generates a specified cell's board."""
        use_cache = bool(request.query_params.get('cache', False))
        data = cache.get(pk) if use_cache else None

        if not data:
            with connect_to_jira() as conn:
                dashboard = Dashboard(int(pk), conn)
                data = DashboardSerializer(dashboard).data
                cache.set(pk, data, settings.CACHE_SPRINT_TIMEOUT_ONE_TIME)
        return Response(data)
Ejemplo n.º 9
0
def upload_commitments_task(board_id: int, cell_name: str) -> None:
    """A task for uploading commitments in the Google Spreadsheet."""
    with connect_to_jira() as conn:
        dashboard = Dashboard(board_id, conn)

    dashboard.delete_mock_users()
    spreadsheet = get_commitments_spreadsheet(cell_name)
    users, column = prepare_commitment_spreadsheet(dashboard, spreadsheet)
    range_ = get_commitment_range(spreadsheet, cell_name)

    upload_commitments(users, column, range_)
Ejemplo n.º 10
0
def move_estimates_to_tickets_task() -> None:
    """
    Applies the average vote results from the closed estimation session to the tickets for every cell.

    If there were no votes for a specific issue, its assignee (or another responsible person) is notified.
    """
    with connect_to_jira() as conn:
        session_name = get_next_poker_session_name(conn)

        for cell in get_cells(conn):
            vote_values = conn.poker_session_vote_values(cell.board_id)
            poker_sessions = conn.poker_sessions(cell.board_id,
                                                 state="CLOSED",
                                                 name=session_name)

            if not poker_sessions and not settings.DEBUG:
                # This can happen if a new cell has been added, then its session was created manually, and it either:
                # - does not have a correct name,
                # - does not have the Jira bot added as its scrum master.
                # noinspection PyUnresolvedReferences
                from sentry_sdk import capture_message

                capture_message(
                    f"Could not find a session called {session_name} in {cell.name}. If this is a new cell, please "
                    f"make sure that an estimation session with this name exists and {settings.JIRA_BOT_USERNAME} "
                    f"has been added as a scrum master there.")

            # Handle applying the results from multiple sessions with the same name (though it should not happen).
            for session in poker_sessions:
                for issue, results in conn.poker_session_results(
                        session.sessionId).items():
                    votes = []
                    for result in results.values():
                        vote = result.get("selectedVote")
                        try:
                            votes.append(float(vote))  # type: ignore
                        except ValueError:
                            # Ignore non-numeric answers.
                            pass

                    try:
                        final_vote = get_poker_session_final_vote(
                            votes, vote_values)
                    except AttributeError:  # No votes.
                        ping_users_on_ticket(
                            conn, conn.issue(issue),
                            settings.SPRINT_ASYNC_POKER_NO_ESTIMATES_MESSAGE)
                    else:
                        if not settings.DEBUG:  # We really don't want to trigger this in the dev environment.
                            conn.update_issue(
                                issue, conn.issue_fields[
                                    settings.JIRA_FIELDS_STORY_POINTS],
                                str(final_vote))
Ejemplo n.º 11
0
def create_role_issues_task(cell: Dict[str, str], sprint_id: int,
                            sprint_number: int) -> None:
    """A task for posting the spillover reason reminder on the issue."""
    rotations = get_rotations_users(str(sprint_number), cell['name'])
    with connect_to_jira() as conn:
        jira_fields = get_issue_fields(conn, settings.JIRA_REQUIRED_FIELDS)
        epic = conn.search_issues(
            **prepare_jql_query_cell_role_epic(
                [
                    'None'
                ],  # We don't need any fields here. The `key` attribute will be sufficient.
                project=cell['name'],
            ),
            maxResults=1,
        )[0]

        fields = {
            'project': cell['key'],
            jira_fields['Issue Type']: 'Story',
            jira_fields['Sprint']: sprint_id,
            jira_fields['Epic Link']: epic.key,
        }

        for role, users in rotations.items():
            for sprint_part, user in enumerate(users):
                user_name = conn.search_users(user)[0].name
                fields.update({
                    jira_fields['Assignee']: {
                        'name': user_name
                    },
                    jira_fields['Reviewer 1']: {
                        'name': user_name
                    },
                })
                for subrole in settings.JIRA_CELL_ROLES.get(role, []):
                    fields.update({
                        jira_fields['Summary']:
                        f"Sprint {sprint_number}{string.ascii_lowercase[sprint_part]} {subrole['name']}",
                        jira_fields['Story Points']:
                        subrole['story_points'],
                        # This needs to be string.
                        jira_fields['Account']:
                        str(
                            subrole.get('account',
                                        settings.JIRA_CELL_ROLE_ACCOUNT)),
                        # This requires special dict structure.
                        'timetracking': {
                            'originalEstimate': f"{subrole['hours']}h"
                        },
                    })

                    conn.create_issue(fields)
Ejemplo n.º 12
0
def trigger_new_sprint_webhooks_task(cell_name: str, sprint_name: str,
                                     sprint_number: int, board_id: int):
    """
    1. Collects a dictionary of rotations, and the cell members.
    2. Collects the usernames of the cell members of a board.
    3. Collects the cell members and their associated roles.
    4. Associates the cell members' info with their roles, and their rotations.
    5. Triggers the active 'new sprint' webhooks.
    """
    with connect_to_jira() as conn:
        # Dictionary containing rotations: {'FF': ['John Doe',...],...}
        rotations = get_rotations_users(str(sprint_number), cell_name)

        # get the rotation role for the future sprint that is current sprint number + 1
        next_sprint_rotations = get_rotations_users(str(sprint_number + 1),
                                                    cell_name)
        future_rotations = {
            f"FS{role}": assignees
            for role, assignees in next_sprint_rotations.items()
        }

        rotations.update(future_rotations)

        # A list of jira usernames for a board: ['johndoe1', 'jane_doe_22', ...]
        members = []
        usernames = get_cell_members(conn.quickfilters(board_id))
        for username in usernames:
            members.append(conn.user(username))

        # Dictionary containing member roles: {'John Doe': ['Sprint Planning Manager', ...],...}
        cell_member_roles = get_cell_member_roles()

        payload = {
            'board_id':
            board_id,
            'cell':
            cell_name,
            'sprint_number':
            sprint_number,
            'sprint_name':
            sprint_name,
            'participants':
            compile_participants_roles(members, rotations, cell_member_roles),
            'event_name':
            "new sprint",
        }

        webhooks = Webhook.objects.filter(events__name="new sprint",
                                          active=True)
        for webhook in webhooks:
            webhook.trigger(payload=payload)
Ejemplo n.º 13
0
def upload_spillovers_task(board_id: int, cell_name: str) -> None:
    """A task for documenting spillovers in the Google Spreadsheet."""
    with connect_to_jira() as conn:
        issue_fields = get_issue_fields(conn,
                                        settings.SPILLOVER_REQUIRED_FIELDS)
        issues = get_spillover_issues(conn, issue_fields, cell_name)
        active_sprints = get_all_sprints(conn)['active']
        meetings = get_meetings_issue(conn, cell_name, issue_fields)
        members = get_cell_member_names(
            conn, get_cell_members(conn.quickfilters(board_id)))

    active_sprints_dict = {int(sprint.id): sprint for sprint in active_sprints}
    rows = prepare_spillover_rows(issues, issue_fields, active_sprints_dict)
    prepare_clean_sprint_rows(rows, members, meetings, issue_fields,
                              active_sprints_dict)
    upload_spillovers(rows)
Ejemplo n.º 14
0
def check_tickets_ready_for_sprint_task() -> None:
    """Notify team members about incomplete tickets."""
    with connect_to_jira() as conn:
        cell_membership = get_cell_membership(conn)
        issues = get_next_sprint_issues(conn)

        for user, incomplete_issues in group_incomplete_issues(conn,
                                                               issues).items():
            # TODO: Add more flexibility to the Mattermost library, to support a nicer format here.
            message = f"{settings.SPRINT_ASYNC_INCOMPLETE_TICKET_MESSAGE}{incomplete_issues}"
            emails = [user.emailAddress]
            # If user is not a member of any cell, then use a default Mattermost channel.
            cell = cell_membership.get(user.name, settings.MATTERMOST_CHANNEL)

            if not settings.DEBUG:  # We really don't want to trigger this in the dev environment.
                create_mattermost_post(message, emails=emails, channel=cell)
Ejemplo n.º 15
0
def close_estimation_session_task() -> None:
    """
    Close all "next-sprint" estimation sessions for every cell.

    The "next-sprint" estimation session needs to match the following criteria:
    - is open,
    - has the name matching the result of the `get_next_poker_session_name` function.
    """
    with connect_to_jira() as conn:
        session_name = get_next_poker_session_name(conn)

        for cell in get_cells(conn):
            poker_sessions = conn.poker_sessions(cell.board_id,
                                                 state="OPEN",
                                                 name=session_name)
            if not settings.DEBUG:  # We really don't want to trigger this in the dev environment.
                # Handle closing multiple sessions with the same name (though it should not happen).
                for session in poker_sessions:
                    conn.close_poker_session(session.sessionId,
                                             send_notifications=True)

    move_estimates_to_tickets_task.delay()
Ejemplo n.º 16
0
def move_out_injections_task() -> None:
    """
    Move injected tickets out of the next sprint to the `SPRINT_ASYNC_INJECTION_SPRINT`.
    Then notify assignees and epic owners about this, if they exist.
    """
    with connect_to_jira() as conn:
        injection_sprint = get_sprint_by_name(
            conn, settings.SPRINT_ASYNC_INJECTION_SPRINT)
        issues = get_next_sprint_issues(conn, changelog=True)

        injections = [
            issue for issue in issues if check_issue_injected(conn, issue)
        ]
        injections_keys = [issue.key for issue in injections]
        if not settings.DEBUG:  # We should not trigger this in the dev environment.
            conn.add_issues_to_sprint(injection_sprint.id, injections_keys)
        for injection in injections:
            ping_users_on_ticket(
                conn,
                injection,
                f"{settings.SPRINT_ASYNC_INJECTION_MESSAGE}{injection_sprint.name}.",
                epic_owner=True,
            )
Ejemplo n.º 17
0
def update_estimation_session_task() -> None:
    """
    Update estimation session's issues and participants.

    If no issues exist for the session, then it will not be updated. The reasoning behind this has been described in the
    `create_estimation_session_task` function.

    This does not override the manual additions to the session - i.e. if an issue or user has been added manually to the
    session, then it will be retained, as it merges available issues and participants with the applied ones.
    However, any removed items (e.g. an issue scheduled for the next sprint, or a user who is a member of the cell)
    will be added back automatically.

    FIXME: This adds all cell members as scrum masters to the session, because at the moment we do not have a way to
           determine whether the member is a part of the core team. We can restrict this in the future, if needed.
    """
    with connect_to_jira() as conn:
        session_name = get_next_poker_session_name(conn)
        issues = get_unestimated_next_sprint_issues(conn)
        # FIXME: This will not work for more than 1000 users. To support it, add `startAt` to handle the pagination.
        all_users = conn.search_users(
            "''",
            maxResults=1000)  # Searching for the "quotes" returns all users.

        for cell in get_cells(conn):
            try:
                poker_session = conn.poker_sessions(cell.board_id,
                                                    state="OPEN",
                                                    name=session_name)[0]
            except IndexError:
                if not settings.DEBUG:
                    # It can happen:
                    # 1. When this runs before the `create_estimation_session_task`, e.g. when the sprint is created
                    #    manually a moment before the full hour.
                    # 2. If a new cell has been added, then its session was created manually, and it either:
                    #    - does not have a correct name,
                    #    - does not have the Jira bot added as its scrum master.
                    # noinspection PyUnresolvedReferences
                    from sentry_sdk import capture_message

                    capture_message(
                        f"Could not find a session called {session_name} in {cell.name}. If you haven't completed the "
                        f"sprint yet, then you should consider adjusting the start time of this task, so it's started "
                        f"only once the new sprint has been started. If this is a new cell, please make sure that an "
                        f"estimation session with this name exists and {settings.JIRA_BOT_USERNAME} has been added as "
                        f"a scrum master there.")
                continue

            # Get IDs of issues belonging only to a specific cell.
            cell_issue_ids = set(issue.id for issue in issues
                                 if issue.key.startswith(cell.key))
            current_issue_ids = set(
                conn.poker_session_results(poker_session.sessionId).keys())
            all_issue_ids = list(current_issue_ids | cell_issue_ids)
            if all_issue_ids:
                # User's `name` and `key` are not always the same.
                members = get_cell_member_names(
                    conn, get_cell_members(conn.quickfilters(cell.board_id)))
                member_keys = set(user.key for user in all_users
                                  if user.displayName in members)

                current_member_keys = set(
                    user.userKey for user in poker_session.participants)
                current_scrum_master_keys = set(
                    user.userKey for user in poker_session.scrumMasters)

                all_member_keys = list(current_member_keys | member_keys)
                all_scrum_master_keys = list(current_scrum_master_keys
                                             | member_keys)

                if not settings.DEBUG:  # We don't want to trigger this in the dev environment.
                    # TODO: Handle 403 response when the bot is not added as a participant.
                    poker_session.update({
                        'issuesIds': all_issue_ids,
                        'participants': all_member_keys,
                        'scrumMasters': all_scrum_master_keys,
                        'sendInvitations':
                        False,  # TODO: This should notify participants in case of any changes.
                    })
Ejemplo n.º 18
0
def complete_sprint_task(board_id: int) -> None:
    """
    1. Upload spillovers.
    2. Upload commitments.
    3. Move archived issues out of the active sprint.
    4. Close the active sprint.
    5. Move issues from the closed sprint to the next one.
    6. Open the next sprint.
    7. Create role tickets.
    8. Trigger the `new sprint` webhooks.
    9. Release the sprint completion lock and clear the cache related to end of sprint date.
    """
    with connect_to_jira() as conn:
        cells = get_cells(conn)
        cell = next(c for c in cells if c.board_id == board_id)
        spreadsheet_tasks = [
            upload_spillovers_task.s(cell.board_id, cell.name),
            upload_commitments_task.s(cell.board_id, cell.name),
        ]

        # Run the spreadsheet tasks asynchronously and wait for the results before proceeding with ending the sprint.
        with allow_join_result():
            # FIXME: Use `apply_async`. Currently blocked because of `https://github.com/celery/celery/issues/4925`.
            #   CAUTION: if you change it, ensure that all tasks have finished successfully.
            group(spreadsheet_tasks).apply().join()

        sprints: List[Sprint] = get_sprints(conn, cell.board_id)
        sprints = filter_sprints_by_cell(sprints, cell.key)

        for sprint in sprints:
            if sprint.state == 'active':
                active_sprint = sprint
                break

        next_sprint = get_next_sprint(sprints, active_sprint)

        archived_issues: List[Issue] = conn.search_issues(
            **prepare_jql_query_active_sprint_tickets(
                [
                    'None'
                ],  # We don't need any fields here. The `key` attribute will be sufficient.
                {settings.SPRINT_STATUS_ARCHIVED},
                project=cell.name,
            ),
            maxResults=0,
        )
        archived_issue_keys = [issue.key for issue in archived_issues]

        issues: List[Issue] = conn.search_issues(
            **prepare_jql_query_active_sprint_tickets(
                [
                    'None'
                ],  # We don't need any fields here. The `key` attribute will be sufficient.
                settings.SPRINT_STATUS_ACTIVE
                | {settings.SPRINT_STATUS_DEPLOYED_AND_DELIVERED},
                project=cell.name,
            ),
            maxResults=0,
        )
        issue_keys = [issue.key for issue in issues]

        if not settings.DEBUG:  # We really don't want to trigger this in the dev environment.
            if settings.FEATURE_CELL_ROLES:
                # Raise error if we can't read roles from the handbook
                get_cell_member_roles()

            # Remove archived tickets from the active sprint. Leaving them might interrupt closing the sprint.
            conn.move_to_backlog(archived_issue_keys)

            # Close the active sprint.
            conn.update_sprint(
                active_sprint.id,
                name=active_sprint.name,
                startDate=active_sprint.startDate,
                endDate=active_sprint.endDate,
                state='closed',
            )

            # Move issues to the next sprint from the closed one.
            conn.add_issues_to_sprint(next_sprint.id, issue_keys)

            # Open the next sprint.
            conn.update_sprint(
                next_sprint.id,
                name=next_sprint.name,
                startDate=next_sprint.startDate,
                endDate=next_sprint.endDate,
                state='active',
            )

            # Ensure that the next sprint exists. If it doesn't exist, create it.
            # Get next sprint number for creating role tasks there.
            if future_next_sprint := get_next_sprint(sprints, next_sprint):
                future_next_sprint_number = get_sprint_number(
                    future_next_sprint)
            else:
                future_next_sprint_number = create_next_sprint_task(board_id)
                future_next_sprint = get_next_sprint(sprints, next_sprint)

            cell_dict = {
                'key': cell.key,
                'name': cell.name,
                'board_id': cell.board_id,
            }

            create_role_issues_task.delay(cell_dict, future_next_sprint.id,
                                          future_next_sprint_number)

            trigger_new_sprint_webhooks_task.delay(
                cell.name, next_sprint.name, get_sprint_number(next_sprint),
                board_id)
Ejemplo n.º 19
0
def _get_current_sprint(type_: str, board_id: int = None) -> Sprint:
    """Get the current sprint. `type_` can be set to `active` or `future`."""
    with connect_to_jira() as conn:
        sprints = get_all_sprints(conn, board_id)[type_]

    return sprints[0]
Ejemplo n.º 20
0
 def list(self, _request):
     """Lists all available cells."""
     with connect_to_jira() as conn:
         cells = get_cells(conn)
     serializer = CellSerializer(cells, many=True)
     return Response(serializer.data)