Пример #1
0
    def create_pr_proc(self):
        api = GhApi(owner='Azure', repo='azure-sdk-for-python', token=self.usr_token)
        pr_title = "[AutoRelease] {}(Do not merge)".format(self.new_branch)
        pr_head = "{}:{}".format(os.getenv('USR_NAME'), self.new_branch)
        pr_base = 'main'
        pr_body = "{} \n{} \n{}".format(self.issue_link, self.test_result, self.pipeline_link)
        res_create = api.pulls.create(pr_title, pr_head, pr_base, pr_body)

        # Add issue link on PR
        api = GhApi(owner='Azure', repo='azure-sdk-for-python', token=self.bot_token)
        api.issues.create_comment(issue_number=res_create.number, body='issue link:{}'.format(self.issue_link))
        self.pr_number = res_create.number
Пример #2
0
def main():
    # Generate PR for auto release SDK
    api = GhApi(owner='Azure', repo='azure-sdk-for-python', token=os.getenv('USR_TOKEN'))
    pr_title = "[AutoRelease] {}(Do not merge)".format(os.getenv('NEW_BRANCH'))
    pr_head = "{}:{}".format(os.getenv('USR_NAME'), os.getenv('NEW_BRANCH'))
    pr_base = os.getenv('TARGET_BRANCH')
    pr_body = "{} \n{} \n{}".format(os.getenv('ISSUE_LINK'), os.getenv('TEST_RESULT'), os.getenv('PIPELINE_LINK'))
    res_create = api.pulls.create(pr_title, pr_head, pr_base, pr_body)
    pr_number = res_create.number

    # Add issue link on PR
    api = GhApi(owner='Azure', repo='azure-sdk-for-python', token=os.getenv('UPDATE_TOKEN'))
    api.issues.create_comment(issue_number=pr_number, body='issue link:{}'.format(os.getenv('ISSUE_LINK')))
Пример #3
0
def _get_github():
    try:
        from ghapi.all import GhApi
    except ImportError:
        return None

    return GhApi()
Пример #4
0
    def ask_check_policy(self):
        changelog = self.get_changelog()
        if changelog == '':
            changelog = 'no new content found by changelog tools!'

        # comment to ask for check from users
        issue_number = int(self.issue_link.split('/')[-1])
        api = GhApi(owner='Azure',
                    repo='sdk-release-request',
                    token=self.bot_token)
        author = api.issues.get(issue_number=issue_number).user.login
        body = f'Hi @{author}, Please check whether the package works well and the CHANGELOG info is as below:\n' \
               f'{self.get_private_package_link()}' \
               f'```\n' \
               f'CHANGELOG:\n' \
               f'{changelog}\n' \
               f'```\n' \
               f'* (If you are not a Python User, you can mainly check whether the changelog meets your requirements)\n\n' \
               f'* (The version of the package is only a temporary version for testing)\n\n' \
               f'https://github.com/Azure/azure-sdk-for-python/pull/{self.pr_number}'
        api.issues.create_comment(issue_number=issue_number, body=body)

        # comment for hint
        body = 'Tips: If you have special needs for release date or other things, please let us know. ' \
               'Otherwise we will release it ASAP after your check.'
        api.issues.create_comment(issue_number=issue_number, body=body)
Пример #5
0
def _get_info(owner, repo, default_branch='main', default_kw='nbdev'):
    try:
        from ghapi.all import GhApi
    except:
        print(
            '''Could not get information from GitHub automatically because `ghapi` is not installed.
Edit `settings.ini` to verify all information is correct.
''')
        return (default_branch, default_kw, '')

    api = GhApi(owner=owner, repo=repo, token=os.getenv('GITHUB_TOKEN'))

    try:
        r = api.repos.get()
    except HTTPError:
        msg = [
            f"""Could not access repo: {owner}/{repo} to find your default branch - `{default} assumed.
Edit `settings.ini` if this is incorrect.
In the future, you can allow nbdev to see private repos by setting the environment variable GITHUB_TOKEN as described here:
https://nbdev.fast.ai/cli.html#Using-nbdev_new-with-private-repos
"""
        ]
        print(''.join(msg))
        return (default_branch, default_kw, '')

    return r.default_branch, default_kw if not r.topics else ' '.join(
        r.topics), r.description
 def __init__(self, path, token, app_name, reconcile_interval):
     http_proxy = os.environ.get("JUJU_CHARM_HTTP_PROXY", None)
     https_proxy = os.environ.get("JUJU_CHARM_HTTPS_PROXY", None)
     no_proxy = os.environ.get("JUJU_CHARM_NO_PROXY", None)
     self.proxies = {}
     if http_proxy:
         self.proxies["http"] = http_proxy
     if https_proxy:
         self.proxies["https"] = https_proxy
     if no_proxy:
         self.proxies["no_proxy"] = no_proxy
     self.session = requests.Session()
     if self.proxies:
         # setup proxy for requests
         self.session.proxies.update(self.proxies)
         # add proxy to fastcore which ghapi uses
         proxy = urllib.request.ProxyHandler(self.proxies)
         opener = urllib.request.build_opener(proxy)
         fastcore.net._opener = opener
     self.jinja = Environment(loader=FileSystemLoader("templates"))
     self.lxd = pylxd.Client()
     self.path = path
     self.api = GhApi(token=token)
     self.app_name = app_name
     self.reconcile_interval = reconcile_interval
def cli(debug: bool, update: bool):
    """ do the needful """
    setup_logging(debug, logger)

    try:
        with open('hangnail_data.json', 'r') as file_handle:
            data = json.load(file_handle)
    except Exception as error_message:
        logger.error(error_message)
        sys.exit(1)

    try:
        from config import GITHUB_TOKEN
    except ImportError:
        GITHUB_TOKEN = os.getenv('GITHUB_TOKEN')
    if not GITHUB_TOKEN:
        logger.error("Couldn't load github token")
        sys.exit(1)

    api = GhApi(token=GITHUB_TOKEN)
    logger.debug("Current user: {}", api.users.get_authenticated().login)

    currently_blocked = api.users.list_blocked_by_authenticated(
    )  # this doesn't work, wtf?
    for user in currently_blocked:
        logger.debug("B: {}", user)
Пример #8
0
def update_feature_votes(app: Sphinx):
    # Only create a new file if none exists (so this will only run if you delete the output file)
    path_output = Path(app.srcdir).joinpath("issue-votes.txt")
    if path_output.exists():
        LOGGER.info(f"Found existing feature votes markdown, to re-download, delete {path_output} first.\n")
        return

    # Pull latest issues data
    # If `None`, ghapi will default to GITHUB_TOKEN
    api = GhApi(token=None)
    repos = api.repos.list_for_org("executablebooks")
    issues = []
    LOGGER.info("Retrieving feature voting issue data...")
    for repo in repos:
        for kind in ["enhancement", "type/enhancement", "type/documentation"]:
            issues += api.issues.list_for_repo("executablebooks", repo['name'], labels=kind, per_page=100, state="open")

    # Extract the metadata that we want
    df = pd.DataFrame(issues)
    df['👍'] = df['reactions'].map(lambda a: a['+1'])
    df['Repository'] = df['html_url'].map(lambda a: f"[{a.rsplit('/')[4]}]({a.rsplit('/', 2)[0]})")
    df['Author'] = df['user'].map(lambda a: f"[@{a['login']}](https://github.com/{a['login']})")
    df['Issue'] = df['html_url'].map(lambda a: f"[#{a.rsplit('/')[-1]}]({a})")
    df = df.rename(columns={"title": "Title"})

    # Sort and remove issues with a very small # of votes
    df = df.sort_values("👍", ascending=False)
    df = df[df['👍'] > 1]

    # Write to markdown
    LOGGER.info("Writing feature voting issues to markdown...")
    df[['👍', 'Repository', "Issue", 'Title', 'Author']].to_markdown(path_output, index=False)
Пример #9
0
def edit_version(add_content):
    global VERSION_NEW

    preview_tag = judge_tag()
    preview_version = 'rc' in VERSION_LAST_RELEASE or 'b' in VERSION_LAST_RELEASE
    #                                           |   preview tag                     | stable tag
    # preview version(1.0.0rc1/1.0.0b1)         | 1.0.0rc2(track1)/1.0.0b2(track2)  |  1.0.0
    # stable  version (1.0.0) (breaking change) | 2.0.0rc1(track1)/2.0.0b1(track2)  |  2.0.0
    # stable  version (1.0.0) (feature)         | 1.1.0rc1(track1)/1.1.0b1(track2)  |  1.1.0
    # stable  version (1.0.0) (bugfix)          | 1.0.1rc1(track1)/1.0.1b1(track2)  |  1.0.1
    preview_label = 'rc' if TRACK == '1' else 'b'
    if preview_version and preview_tag:
        VERSION_NEW = preview_version_plus(preview_label)
    elif preview_version and not preview_tag:
        VERSION_NEW = VERSION_LAST_RELEASE.split(preview_label)[0]
    elif not preview_version and preview_tag:
        VERSION_NEW = stable_version_plus(add_content) + preview_label + '1'
    else:
        VERSION_NEW = stable_version_plus(add_content)

    # additional rule for track1: if version is 0.x.x, next version is 0.x+1.0
    if TRACK == '1' and VERSION_LAST_RELEASE[0] == '0':
        num = VERSION_LAST_RELEASE.split('.')
        VERSION_NEW = f'{num[0]}.{int(num[1]) + 1}.0'
    # '0.0.0' means there must be abnormal situation
    if VERSION_NEW == '0.0.0':
        api_request = GhApi(owner='Azure',
                            repo='sdk-release-request',
                            token=os.getenv('UPDATE_TOKEN'))
        link = os.getenv('ISSUE_LINK')
        issue_number = link.split('/')[-1]
        api_request.issues.add_labels(issue_number=int(issue_number),
                                      labels=['base-branch-attention'])
Пример #10
0
 def zero_version_policy(self):
     if self.next_version == '0.0.0':
         api_request = GhApi(owner='Azure',
                             repo='sdk-release-request',
                             token=self.bot_token)
         issue_number = int(self.issue_link.split('/')[-1])
         api_request.issues.add_labels(issue_number=issue_number,
                                       labels=['base-branch-attention'])
Пример #11
0
    def create_pr_proc(self):
        api = GhApi(owner='Azure',
                    repo='azure-sdk-for-python',
                    token=self.bot_token)
        pr_title = "[AutoRelease] {}(Do not merge)".format(self.new_branch)
        pr_head = "{}:{}".format(os.getenv('USR_NAME'), self.new_branch)
        pr_base = 'main'
        pr_body = "{} \n{} \n{}".format(self.issue_link, self.test_result,
                                        self.pipeline_link)
        if not self.is_single_path:
            pr_body += f'\nBuildTargetingString\n  azure-mgmt-{self.package_name}\nSkip.CreateApiReview\ntrue'
        res_create = api.pulls.create(pr_title, pr_head, pr_base, pr_body)

        # Add issue link on PR
        api = GhApi(owner='Azure',
                    repo='azure-sdk-for-python',
                    token=self.bot_token)
        api.issues.create_comment(issue_number=res_create.number,
                                  body='issue link:{}'.format(self.issue_link))
        self.pr_number = res_create.number
Пример #12
0
def update_feature_votes(app: Sphinx):
    """Update the +1 votes for features.

    This will only run if `issue-votes.txt` does not exist and if a GITHUB_TOKEN
    environment variable is present.
    """
    # Only create a new file if none exists (so this will only run if you delete the output file)
    path_output = Path(app.srcdir).joinpath("issue-votes.txt")
    if path_output.exists():
        LOGGER.info(
            f"Found existing feature votes markdown, to re-download, delete {path_output} first.\n"
        )
        return

    # Pull latest issues data
    token = os.environ.get("GITHUB_TOKEN")
    if not token:
        LOGGER.info(
            f"No token found at {os.environ.get('GITHUB_TOKEN')}, GitHub "
            "issue information will not be used. "
            "Create a GitHub Personal Access Token and assign it to GITHUB_TOKEN"
        )
        return
    api = GhApi(token=token)
    repos = api.repos.list_for_org("executablebooks")
    issues = []
    LOGGER.info("Retrieving feature voting issue data...")
    for repo in repos:
        for kind in ["enhancement", "type/enhancement", "type/documentation"]:
            issues += api.issues.list_for_repo("executablebooks",
                                               repo["name"],
                                               labels=kind,
                                               per_page=100,
                                               state="open")

    # Extract the metadata that we want
    df = pd.DataFrame(issues)
    df["👍"] = df["reactions"].map(lambda a: a["+1"])
    df["Repository"] = df["html_url"].map(
        lambda a: f"[{a.rsplit('/')[4]}]({a.rsplit('/', 2)[0]})")
    df["Author"] = df["user"].map(
        lambda a: f"[@{a['login']}](https://github.com/{a['login']})")
    df["Issue"] = df["html_url"].map(lambda a: f"[#{a.rsplit('/')[-1]}]({a})")
    df = df.rename(columns={"title": "Title"})

    # Sort and remove issues with a very small # of votes
    df = df.sort_values("👍", ascending=False)
    df = df[df["👍"] > 1]

    # Write to markdown
    LOGGER.info("Writing feature voting issues to markdown...")
    df[["👍", "Repository", "Issue", "Title",
        "Author"]].to_markdown(path_output, index=False)
Пример #13
0
def _get_branch(owner, repo, default='main'):
    api = GhApi(owner=owner, repo=repo, token=os.getenv('GITHUB_TOKEN'))
    try:
        return api.repos.get().default_branch
    except HTTPError:
        msg = [
            f"Could not access repo: {owner}/{repo} to find your default branch - `{default} assumed.\n",
            "Edit `settings.ini` if this is incorrect.\n"
            "In the future, you can allow nbdev to see private repos by setting the environment variable GITHUB_TOKEN as described here: https://nbdev.fast.ai/cli.html#Using-nbdev_new-with-private-repos \n",
        ]
        print(''.join(msg))
        return default
Пример #14
0
    def __init__(self):
        self.ghapi = GhApi(token=cfg.pat)
        self.lxd = lxd.LXDRunner(connect=False)
        self.runnermap = {item.labels: item for item in cfg.runnermap}
        self.queues = {item.labels: queue.Queue() for item in cfg.runnermap}
        # For testing
        # self.activecfg = cfg.activecfg

        # Cache for various github resources
        self.reg_tokens = {}
        self.orgs = []
        self.repos = []
        self.pkgs = []
Пример #15
0
def main():
    # Process command line arguments
    parser = argparse.ArgumentParser(
        "Check if any opened issues have been closed, run pylint, and open an issue if pylint complains"
    )
    parser.add_argument("--repo",
                        help="The owner and repository we are operating on")
    parser.add_argument(
        "--label", help="The name of the GitHub label for generated issues")
    parser.add_argument("--src", help="The source directory to run pylint on")
    args = parser.parse_args()
    # Connect to GitHub REST API
    api = GhApi()
    # Determine if we should run pylint
    open_issues = api.search.issues_and_pull_requests(
        f"repo:{args.repo} label:{args.label} is:issue is:open")
    if open_issues.total_count != 0:
        print(
            f"Skipping pylint run due to existing issue {open_issues['items'][0]['html_url']}."
        )
        sys.exit(0)
    # Now run pylint
    (output, pylint_exitcode) = run_pylint(args.src)
    if pylint_exitcode == 0:
        sys.exit(0)
    # File an issue
    issue_body = (
        f"A version of `pylint` is available in the Python package repositories that identifies issues "
        f"with the `spotfire` package.  Since we attempt to keep all pylint issues out of the source "
        f"code (either by fixing the issue identified or by disabling that message with a localized "
        f"comment), this is indicative of a new check in this new version of `pylint`.\n\n"
        f"Please investigate these issues, and either fix the source or disable the check with a "
        f"comment.  Further checks by this automation will be held until this issue is closed.  Make "
        f"sure that the fix updates the `pylint` requirement in `requirements_dev.txt` to the version "
        f"identified here ({pl_version}).\n\n"
        f"For reference, here is the output of this version of `pylint`:\n\n"
        f"```\n"
        f"$ pylint {args.src}\n"
        f"{output}\n"
        f"```\n\n"
        f"*This issue was automatically opened by the `pylint.yaml` workflow.*\n"
    )
    owner_repo = args.repo.split("/")
    new_issue = api.issues.create(
        owner=owner_repo[0],
        repo=owner_repo[1],
        title=f"New version of pylint ({pl_version}) identifies new issues",
        body=issue_body,
        labels=[args.label])
    print(f"Opened issue {new_issue['html_url']}")
Пример #16
0
def write_issues(output_file=config["issues"]):
    issues = {}
    with jsonlines.open(config["projects"], mode="r") as reader:
        for proj in reader:
            api = GhApi()
            r = api.issues.list_for_repo(
                state="open", owner=proj["project"], repo=proj["repo"]
            )
            issues[f'{proj["project"]}/{proj["repo"]}'] = [
                serialize_issue(i) for i in r
            ]
    with jsonlines.open(output_file, "a") as writer:
        writer.write(issues)
    return issues
def gh_issues():
    res = {}
    if 'CI' not in os.environ or ('GITHUB_ACTIONS' in os.environ
                                  and sys.version_info.minor >= 8):
        try:
            api = GhApi(owner='atmos-cloud-sim-uj', repo='PySDM')
            pages = paged(api.issues.list_for_repo,
                          owner='atmos-cloud-sim-uj',
                          repo='PySDM',
                          state='all',
                          per_page=100)
            for page in pages:
                for item in page.items:
                    res[item.number] = item.state
        except ExceptionsHTTP[403]:
            pass
    return res
Пример #18
0
def gh_issues():
    """ pytest fixture providing a dictionary with github issue ids as keys
        and their state as value """
    res = {}
    if 'CI' not in os.environ or ('GITHUB_ACTIONS' in os.environ
                                  and sys.version_info.minor >= 8):
        try:
            api = GhApi(owner='atmos-cloud-sim-uj', repo='PyMPDATA')
            pages = paged(api.issues.list_for_repo,
                          owner='atmos-cloud-sim-uj',
                          repo='PyMPDATA',
                          state='all',
                          per_page=100)
            for page in pages:
                for item in page.items:
                    res[item.number] = item.state
        except ExceptionsHTTP[403]:
            pass
    return res
Пример #19
0
def gh_issues():
    res = {}
    if "CI" not in os.environ or ("GITHUB_ACTIONS" in os.environ
                                  and sys.version_info.minor >= 8):
        try:
            api = GhApi(owner="atmos-cloud-sim-uj", repo="PySDM")
            pages = paged(
                api.issues.list_for_repo,
                owner="atmos-cloud-sim-uj",
                repo="PySDM",
                state="all",
                per_page=100,
            )
            for page in pages:
                for item in page.items:
                    res[item.number] = item.state
        except ExceptionsHTTP[403]:
            pass
    return res
Пример #20
0
    def __init__(
        self,
        owner: str = DEFAULT_OWNER,
        purge_cache: bool = False,
        repo: str = DEFAULT_REPO,
        token: Optional[str] = DEFAULT_TOKEN,
        verbose: bool = False,
    ):
        if not token:
            raise ValueError("GitHub token was not provided.")

        if verbose:
            logger.setLevel(logging.DEBUG)
        else:
            logger.setLevel(logging.INFO)

        if purge_cache:
            shutil.rmtree(ASSET_CACHE_DIR)

        self.gh_api = GhApi(owner=owner, repo=repo, token=token)
        self.session = requests.Session()
        self.session.headers["Authorization"] = f"token {token}"
Пример #21
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument(
        "--token",
        help="github oauth token",
    )
    parser.add_argument(
        "--exclude",
        default=DEFAULT_EXCLUDES,
        action="append",
        help="repos to exclude",
    )
    parser.add_argument(
        "--include",
        default=[],
        action="append",
        help="repos to include",
    )
    parser.add_argument(
        "--org",
        default="linux-system-roles",
        help="github organization",
    )
    parser.add_argument(
        "--state",
        action="append",
        help="only show statuses that have these states",
    )
    parser.add_argument(
        "--env",
        action="append",
        help="only show statuses from these environments",
    )
    parser.add_argument(
        "--ansible",
        action="append",
        help="only show statuses from tests with these versions of ansible",
    )
    parser.add_argument(
        "--platform",
        action="append",
        help="only show statuses from tests with these platforms",
    )
    parser.add_argument(
        "-v",
        "--verbose",
        action="count",
        default=0,
        help="-v -v to increase verbosity",
    )
    parser.add_argument(
        "--user",
        action="append",
        help="only show statuses from PRs submitted by these users",
    )
    parser.add_argument(
        "--updated-since",
        action="store",
        default=0,
        type=int,
        help="only show statuses updated since this many hours ago",
    )
    parser.add_argument(
        "--sort-spec",
        action="append",
        default=[],
        help="sort PRs by these criteria",
    )
    parser.add_argument(
        "--stat-type",
        choices=["status", "check", "both"],
        default="both",
        help="Return only statuses or checks",
    )

    args = parser.parse_args()
    if not args.state:
        args.state = DEFAULT_STATES
    if not args.env:
        args.env = DEFAULT_ENVIRONMENTS
    if not args.ansible:
        args.ansible = DEFAULT_ANSIBLES
    if not args.platform:
        args.platform = DEFAULT_PLATFORMS
    if not args.token:
        with open(f"{os.environ['HOME']}/.config/hub") as hub:
            hsh = yaml.safe_load(hub)
            args.token = hsh["github.com"][0]["oauth_token"]
    no_status_error_time = datetime.timedelta(hours=8)
    pending_error_time = datetime.timedelta(hours=8)
    status_env = [
        "" if xx == "production" else " (staging)" for xx in args.env
    ]
    required_statuses = set([
        f"{platform}/{ansible}{env}"
        for platform, ansible, env in itertools.product(
            args.platform, args.ansible, status_env)
        if not (platform.startswith("fedora-") and ansible.endswith("-2.8"))
    ])

    prs = []
    gh = GhApi(token=args.token)
    rate_limit = gh.rate_limit.get()
    print(f"github limit remaining is {rate_limit.rate.remaining}")
    now = datetime.datetime.now(tz=datetime.timezone.utc)
    logging.debug(f"time now is {now}")

    # gather the data
    for repo in gh_iter(gh.repos.list_for_org, "", args.org, per_page=100):
        if repo.name in args.exclude:
            logging.debug(f"Excluding repo {repo.name}")
            continue
        if args.include and repo.name not in args.include:
            logging.debug(f"repo not included {repo.name}")
            continue
        for pr in gh_iter(
                gh.pulls.list,
                "",
                owner=args.org,
                repo=repo.name,
                state="open",
                sort="created",
                per_page=100,
        ):
            if not match_user(pr.user.login, args):
                continue
            pr_updated_at = conv_to_aware(pr.updated_at)
            if args.updated_since:
                updated_since = now - datetime.timedelta(
                    hours=args.updated_since)
            else:
                updated_since = None
            if args.stat_type in ["both", "status"]:
                statuses, excluded_statuses = get_statuses(
                    gh, args, repo, pr, updated_since)
                setattr(pr, "statuses", statuses)
                setattr(pr, "excluded_statuses", excluded_statuses)
                if args.stat_type == "status":
                    setattr(pr, "checks", [])
            if args.stat_type in ["both", "check"]:
                checks = get_checks(gh, args, repo, pr)
                setattr(pr, "checks", checks)
                if args.stat_type == "check":
                    setattr(pr, "statuses", [])
                    setattr(pr, "excluded_statuses", [])
            setattr(pr, "updated_at_conv", pr_updated_at)
            prs.append(pr)

    # sort the list by the field of pr
    # updated_at_conv
    # statuses.oldest
    # statuses.newest
    # checks.oldest_start
    # checks.newest_start
    # checks.oldest_finish
    # checks.newest_finish
    # statuses.failed
    # checks.failed
    # statuses.pending
    # checks.pending
    for raw_key in args.sort_spec:
        match = re.match(r"^(.+?)([-+])?$", raw_key)
        key = match.group(1)
        rev = match.group(2) == "-"
        prs.sort(key=attrgetter(key), reverse=rev)

    # report the data
    for pr in prs:
        indent = ""
        print(f"{indent}PR {pr.base.repo.name}#{pr.number} {pr.title}")
        indent += "  "
        print(f"{indent}{pr.user.login} Updated:{pr.updated_at} {pr.html_url}")
        checks = pr.checks
        statuses = pr.statuses
        excluded_statuses = pr.excluded_statuses
        if not checks:
            print(f"{indent}no checks for PR")
        elif checks.pending or checks.failed:
            print(
                f"{indent}checks: {checks.count} total - {checks.pending} pending - {checks.failed} failed"
            )
            if args.verbose:
                for name, check_run in checks.items():
                    print(
                        f"{indent}  {name} status {check_run.status} conclusion {check_run.conclusion} "
                        f"started_at {check_run.started_at} completed_at {check_run.completed_at}"
                    )
        else:
            print(f"{indent}checks: {checks.count} total all passed")

        if not statuses and now - pr_updated_at > no_status_error_time:
            print(
                f"{indent}problem - PR has no status after {no_status_error_time}"
            )
        elif not statuses:
            print(f"{indent}no status for PR")
        else:
            for context in required_statuses:
                if context not in statuses and context not in excluded_statuses:
                    statuses.missing += 1
                    status = argparse.Namespace()
                    setattr(status, "state", "missing")
                    statuses[context] = status
            if statuses.pending or statuses.failed or statuses.missing:
                print(
                    f"{indent}statuses: matched {len(statuses)} out of {statuses.count} total "
                    f"- {statuses.pending} pending "
                    f"- {statuses.failed} failed - {statuses.missing} missing")
                oldest_pending_status = None
                oldest_pending_status_dt = None
                for context, status in statuses.items():
                    if args.verbose:
                        if status.state == "missing":
                            print(f"{indent}  {context} is missing")
                            continue
                        print(
                            f"{indent}  {context} status {status.state} updated {status.updated_at}"
                        )
                    if status.state == "pending" and (
                            not oldest_pending_status_dt or
                            status.updated_at_conv < oldest_pending_status_dt):
                        oldest_pending_status_dt = status.updated_at_conv
                        oldest_pending_status = status
                if oldest_pending_status:
                    status_age = now - oldest_pending_status_dt
                    print(
                        f"{indent}oldest pending status is {oldest_pending_status_dt} for "
                        f"status {oldest_pending_status.context} diff {status_age}"
                    )
                    if status_age > pending_error_time:
                        print(
                            f"{indent}possible hang - age of {oldest_pending_status_dt} for "
                            f"status {oldest_pending_status.context}")
            else:
                print(
                    f"{indent}statuses: matched {len(statuses)} out of {statuses.count} total - all passed"
                )
    rate_limit = gh.rate_limit.get()
    print(f"github limit remaining is {rate_limit.rate.remaining}")
Пример #22
0
from ghapi.all import GhApi, paged

api = GhApi(owner='openfast', repo='openfast')

# tags = api.repos.list_tags()
# last_release_sha = tags[0].commit.sha
# start_commit = api.git.get_commit(last_release_sha)

# last_release_sha = "ff33ca1cf65f2e13c1de0ab78cc2396ec4a47ce0"

start_commit = api.git.get_commit("42a5a8196529ae0349eda6d797a79461c2c03ff0")
start_date = start_commit.committer.date
stop_commit = api.git.get_commit("a5d6262d62e573dbc8177f6934c336e86dcdba31")
stop_date = stop_commit.committer.date

# Find all merged pull requests in the date range
pulls = api.pulls.list(state="closed", per_page=100)
pulls_since = []
for pull in pulls:
    # print(pull.number)

    # Skip if the pull request was closed without merge
    if pull.merged_at is None:
        continue

    # Skip if this pull request is merged before the last release
    if pull.merged_at <= start_date:
        continue

    # Skip if this pull request is merged after the stop date
"""Labels PRs based on title. Must be run in a github action with the
pull_request_target event."""
from ghapi.all import context_github
from ghapi.all import GhApi
from ghapi.all import user_repo
from ghapi.all import github_token
import re

owner, repo = user_repo()
pull_request = context_github.event.pull_request
title = pull_request.title

regex_to_labels = [(r"\bDOC\b", "Documentation"), (r"\bCI\b", "Build / CI")]

labels_to_add = [
    label for regex, label in regex_to_labels if re.search(regex, title)
]

if labels_to_add:
    api = GhApi(owner=owner, repo=repo, token=github_token())
    api.issues.add_labels(pull_request.number, labels=labels_to_add)
Пример #24
0
import argparse
import time
import sys
from ghapi.all import GhApi

timeout = 3 * 60 * 60  # wait for max 3 hours
interval = 5 * 60  # wait for 5 minutes between query status

parser = argparse.ArgumentParser(description="Re-run failed GitHub workflow")
parser.add_argument("owner", type=str, help="owner")
parser.add_argument("repo", type=str, help="repository")
parser.add_argument("run", type=str, help="run id")
args = parser.parse_args()

api = GhApi(args.owner, args.repo)
run_id = args.run

start = int(time.time())
while (int(time.time()) - start < timeout):
    status = api.actions.get_workflow_run(run_id).status
    print("Workflow Status:", status)
    if status == "completed":
        result = api.actions.get_workflow_run(run_id).conclusion
        print("Workflow Result:", result)
        if result != "success" and result != "cancelled":
            api.actions.re_run_workflow(run_id)
            print("Re-run workflow")
            sys.exit(0)
        else:
            print("Needn't re-run if it is success or cancelled")
Пример #25
0
 def construct_interface(self):
     if self.ghapi is None:
         self.ghapi = GhApi(token=self.github_token)
Пример #26
0
def prepare_release(new_version_num):
    """Change version number in the cfg files."""
    api = GhApi(
        owner=os.environ["GITHUB_REPOSITORY_OWNER"],
        repo="sqlfluff",
        token=os.environ["GITHUB_TOKEN"],
    )
    releases = api.repos.list_releases()

    latest_draft_release = None
    for rel in releases:
        if rel["draft"]:
            latest_draft_release = rel
            break

    if not latest_draft_release:
        raise ValueError("No draft release found!")

    # Linkify the PRs and authors
    draft_body_parts = latest_draft_release["body"].split("\n")
    potential_new_contributors = []
    for i, p in enumerate(draft_body_parts):
        draft_body_parts[i] = re.sub(
            r"\(#([0-9]*)\) @([^ ]*)$",
            r"[#\1](https://github.com/sqlfluff/sqlfluff/pull/\1) [@\2](https://github.com/\2)",  # noqa E501
            p,
        )
        new_contrib_string = re.sub(
            r".*\(#([0-9]*)\) @([^ ]*)$",
            r"* [@\2](https://github.com/\2) made their first contribution in [#\1](https://github.com/sqlfluff/sqlfluff/pull/\1)",  # noqa E501
            p,
        )
        if new_contrib_string.startswith("* "):
            new_contrib_name = re.sub(r"\* \[(.*?)\].*", r"\1",
                                      new_contrib_string)
            potential_new_contributors.append({
                "name": new_contrib_name,
                "line": new_contrib_string
            })
    whats_changed_text = "\n".join(draft_body_parts)

    # Find the first commit for each contributor in this release
    potential_new_contributors.reverse()
    seen_contributors = set()
    deduped_potential_new_contributors = []
    for c in potential_new_contributors:
        if c["name"] not in seen_contributors:
            seen_contributors.add(c["name"])
            deduped_potential_new_contributors.append(c)

    input_changelog = open("CHANGELOG.md", encoding="utf8").readlines()
    write_changelog = open("CHANGELOG.md", "w", encoding="utf8")
    for i, line in enumerate(input_changelog):
        write_changelog.write(line)
        if "DO NOT DELETE THIS LINE" in line:
            existing_entry_start = i + 2
            # If the release is already in the changelog, update it
            if f"## [{new_version_num}]" in input_changelog[
                    existing_entry_start]:
                input_changelog[
                    existing_entry_start] = f"## [{new_version_num}] - {time.strftime('%Y-%m-%d')}\n"

                # Delete the existing What’s Changed and New Contributors sections
                remaining_changelog = input_changelog[existing_entry_start:]
                existing_whats_changed_start = (
                    next(j for j, line in enumerate(remaining_changelog)
                         if line.startswith("## What’s Changed")) +
                    existing_entry_start)
                existing_new_contributors_start = (
                    next(j for j, line in enumerate(remaining_changelog)
                         if line.startswith("## New Contributors")) +
                    existing_entry_start)
                existing_new_contributors_length = (
                    next(j for j, line in enumerate(
                        input_changelog[existing_new_contributors_start:])
                         if line.startswith("## [")) - 1)

                del input_changelog[existing_whats_changed_start:
                                    existing_new_contributors_start +
                                    existing_new_contributors_length]

                # Now that we've cleared the previous sections, we will accurately
                # find if contributors have been previously mentioned in the changelog
                new_contributor_lines = []
                input_changelog_str = "".join(
                    input_changelog[existing_whats_changed_start:])
                for c in deduped_potential_new_contributors:
                    if c["name"] not in input_changelog_str:
                        new_contributor_lines.append(c["line"])
                input_changelog[existing_whats_changed_start] = (
                    whats_changed_text + "\n\n## New Contributors\n" +
                    "\n".join(new_contributor_lines) + "\n\n")

            else:
                write_changelog.write(
                    f"\n## [{new_version_num}] - {time.strftime('%Y-%m-%d')}\n\n## Highlights\n\n"  # noqa E501
                )
                write_changelog.write(whats_changed_text)
                write_changelog.write("\n## New Contributors\n\n")
                # Ensure contributor names don't appear in input_changelog list
                new_contributor_lines = []
                input_changelog_str = "".join(input_changelog)
                for c in deduped_potential_new_contributors:
                    if c["name"] not in input_changelog_str:
                        new_contributor_lines.append(c["line"])
                write_changelog.write("\n".join(new_contributor_lines))
                write_changelog.write("\n")

    write_changelog.close()

    for filename in ["setup.cfg", "plugins/sqlfluff-templater-dbt/setup.cfg"]:
        input_file = open(filename, "r").readlines()
        write_file = open(filename, "w")
        for line in input_file:
            for key in ["stable_version", "version"]:
                if line.startswith(key):
                    line = f"{key} = {new_version_num}\n"
                    break
            if line.startswith("    sqlfluff=="):
                line = f"    sqlfluff=={new_version_num}\n"
            write_file.write(line)
        write_file.close()

    for filename in ["docs/source/gettingstarted.rst"]:
        input_file = open(filename, "r").readlines()
        write_file = open(filename, "w")
        change_next_line = False
        for line in input_file:
            if change_next_line:
                line = f"    {new_version_num}\n"
                change_next_line = False
            elif line.startswith("    $ sqlfluff version"):
                change_next_line = True
            write_file.write(line)
        write_file.close()
Пример #27
0
def get_api():
    owner, repo = get_github_env('repository').split('/')
    token = get_github_env('token')
    return GhApi(owner=owner, repo=repo, token=token)
Пример #28
0
"""
This script helps remind our team when it's time to merge a PR. It does these things:

- Grab a list of all open pull requests in our infrastructure repository.
- For any that have a PR approval, check how long ago that approval was.
- If the approval was longer than 24 hours ago, add it to a list of PRs we should merge
- Ping our #hub-development channel with a list of the PRs that are ready to merge
"""
from ghapi.all import GhApi
import pandas as pd
from datetime import datetime

api = GhApi()

open_prs = api.pulls.list("2i2c-org", "infrastructure", state="open")

msg = ""
for pr in open_prs:
    print(f"Checking PR: {pr['title']} ({pr['html_url']})")
    reviews = api.pulls.list_reviews("2i2c-org", "infrastructure", pr["number"])
    for review in reviews:
        # Only care about reviews that caused approvals
        if review["state"] != "APPROVED":
            continue
        # Check whether we've had an approval for > 2 business days
        today = pd.to_datetime(datetime.today()).tz_localize("US/Pacific")
        review_time = pd.to_datetime(review["submitted_at"]).astimezone("US/Pacific")
        n_workdays = len(pd.bdate_range(review_time.date(), today.date()))
        if n_workdays > 2:
            msg += f"- {pr['title']} ({pr['html_url']}) - {n_workdays} workdays old.\n"
if len(msg) > 0:
Пример #29
0
SMTP_SERVER = "smtp.gmail.com"
SENDER_EMAIL = "*****@*****.**"

if __name__ == "__main__":

    ### Get authentication info
    if os.getenv('CI'):
        TOKEN = os.getenv('CE_DATABASE_TOKEN')
        EMAIL_PW = os.getenv('CE_EMAIL_PW')
    else:
        data = json.load(open("auth.json", "r"))
        TOKEN = data["CE_DATABASE_TOKEN"]
        EMAIL_PW = data["CE_EMAIL_PW"]

    ### Get the database from GitHub
    api = GhApi(owner='rafmudaf', repo='CE-Hearing-Database', token=TOKEN)

    addresses, subscribers = [], []
    subscriber_file = api.get_content("subscribers.csv").decode("utf-8").split(
        '\n')
    # subscriber_file = open("../CE-HEARING-DATABASE/subscribers.csv")

    for entry in subscriber_file:
        # split the contents into the target address and all subscribers
        address_subscribers = entry.split(',')

        # save the address
        addresses.append(address_subscribers[0])

        # save subscribers
        subscribers += [s.strip() for s in address_subscribers[1:]]
Пример #30
0
Файл: a.py Проект: yamt/garbage
import os
import itertools

from ghapi.all import GhApi
from ghapi.all import paged
from ghapi.all import print_summary
import dateutil.parser
import matplotlib.pyplot as plot
from matplotlib.ticker import MultipleLocator
import numpy as np

owner = os.environ.get("OWNER", "yamt")
repo = os.environ.get("REPO", "garbage")

api = GhApi(owner=owner, repo=repo)
api.debug = print_summary

# default: sort=created, direction=desc
# XXX should filter on target branch
# XXX esp-idf often uses an awkward way to merge PRs.
#     how can i deal with it?
#     eg. https://github.com/espressif/esp-idf/pull/8248
# XXX for some reasons, state=closed often causes 502
#     for kubernetes/kubernetes.
pgs = paged(api.pulls.list, state="all", per_page=100)
l = itertools.chain.from_iterable(pgs)
l = filter(lambda p: p.merged_at is not None, l)
l = itertools.islice(l, 500)

day_in_sec = 24 * 60 * 60.0