def handle(self, *args, **options): api = scraper.GitHubAPI() now = datetime.datetime.now() df = pd.DataFrame(columns=("core_limit", "core_remaining", "core_renews_in", "search_limit", "search_remaining", "search_renews_in", "key")) for token in api.tokens: # if limit is exhausted there is no way to get username user = token.user or "<unknown%d>" % len(df) values = {'key': token.token} token._check_limits() for api_class in token.limit: # geez, this code smells next_update = token.limit[api_class]['reset_time'] if next_update is None: renew = 'never' else: tdiff = datetime.datetime.fromtimestamp(next_update) - now renew = "%dm%ds" % divmod(tdiff.seconds, 60) values[api_class + '_renews_in'] = renew values[api_class + '_limit'] = token.limit[api_class]['limit'] values[api_class + '_remaining'] = token.limit[api_class]['remaining'] df.loc[user] = values print(df)
#!/usr/bin/python import argparse import pandas as pd from common import decorators as d import scraper import csv api = scraper.GitHubAPI() def get_userEmail(loginID): # print("get user %s email address " % (loginID)) return api.userEmail(loginID) def get_userInfo(loginID): print("get user %s info " % (loginID)) return api.userInfo(loginID) def get_isFork(repoUrl): # print(" check if repo %s a fork" % (repoUrl)) return api.isFork(repoUrl) if __name__ == "__main__": parser = argparse.ArgumentParser(description="do the magic") parser.add_argument('-i',