def find_repo_latest_commit(commits_url): commits_url = re.sub("{/sha}", "", commits_url) params = {'page': 1} headers = {'Authorization': get_auth_token()} latest_commit_response = requests.get(commits_url, headers=headers) latest_commit_data = latest_commit_response.json() latest_commit_sha = latest_commit_data[0]["sha"] return latest_commit_sha
def find_repo_latest_release(releases_url, tags_url): latest_release = {} headers = {'Authorization': get_auth_token()} latest_release_url = re.sub("{/id}", "/latest", releases_url) latest_release_response = requests.get(latest_release_url, headers=headers) latest_release_data = latest_release_response.json() if "tag_name" in latest_release_data: latest_release["tag_name"] = latest_release_data["tag_name"] latest_release["release_commit_sha"] = find_release_sha( tags_url, latest_release["tag_name"]) return latest_release
def find_total_commits(url): headers = { 'Authorization': get_auth_token() } params = { 'per_page': '1' } total_commits_url = url + "/commits" commits_response = requests.get(total_commits_url, params=params, headers=headers) last_page = re.findall(r'&page=\d+', str(commits_response.headers))[-1] return int(re.findall(r'\d+', last_page)[0])
def find_release_sha(tags_url, tag_name): release_sha = "" n = 0 headers = {'Authorization': get_auth_token()} while not release_sha: n += 1 params = {'page': n} tags_response = requests.get(tags_url, params=params, headers=headers) tags_response_data = tags_response.json() for i in range(len(tags_response_data)): if tags_response_data[i]["name"] == tag_name: release_sha = tags_response_data[i]["commit"]["sha"] return release_sha
def find_contributors(contributors_url): headers = { 'Authorization': get_auth_token() } params = { 'per_page': 1, 'anon': '1' } contributors_response = requests.get(contributors_url, params=params, headers=headers) pages = re.findall(r'&page=\d+', str(contributors_response.headers)) number_of_contributors = 0 if (len(pages) > 0): last_page = pages[-1] number_of_contributors = int(re.findall(r'\d+', last_page)[0]) else: number_of_contributors = 1 return number_of_contributors
def call_github_api(): all_search_items = {} all_search_items["items"] = [] for i in range(1, 11): params = { 'q': 'docker in:readme language:Java', 'sort': 'stars', 'per_page': '100', 'page': i } headers = { 'Authorization': get_auth_token() } print(params) response = requests.get('https://api.github.com/search/repositories', params=params, headers=headers) data_from_page = response.json() for i in range(len(data_from_page["items"])): all_search_items["items"].append(data_from_page["items"][i]) with open(query_results_file, 'w') as json_file: json.dump(all_search_items, json_file, indent=2)
def compile (self): """ Compiles the dataset """ # @var auth_token String auth_token = config.get_auth_token (config.umucorpusclassifier_api_endpoint, config.certificate) # @request_payload Dir request_payload = { 'export-format': 'csv', 'size': self.options['max'], 'corpora[]': ','.join (str(x) for x in self.options['ids']), 'preprocessing[]': self.options['preprocessing'], 'fields[]': self.options['fields'] } # Attach strategy (if specified) if ('strategy' in self.options): request_payload['strategy'] = self.options['strategy']; # Attach if the corpus is balanced if ('balanced' in self.options): request_payload['balanced'] = True; if ('filter-date-start' in self.options): request_payload['filter-date-start'] = self.options['filter-date-start']; if ('filter-date-end' in self.options): request_payload['filter-date-end'] = self.options['filter-date-end']; # @var reponse Response response = requests.post ( config.umucorpusclassifier_api_endpoint + 'admin/export-corpus.csv', json = request_payload, verify = config.certificate, auth = config.PLNAuth (auth_token), ) # Check response if (response.status_code == 401): print ("Authentication failed: " + str (response.status_code)) print (response.text) print (request_payload) sys.exit () if (response.status_code != 200): print ("Request failed: " + str (response.status_code)) print (response.text) print (request_payload) sys.exit () # Store compiled dataset to disk s = response.content return pd.read_csv (io.StringIO (s.decode ('utf-8')))
def find_languages(languages_url): headers = { 'Authorization': get_auth_token() } languages_response = requests.get(languages_url, headers=headers) return languages_response.json()
return None if __name__ == '__main__': # initialise logger logger = Logger("MAIN", log_folder=get_log_folder(), enable_logger_name=True, enabled_log_levels=get_enabled_log_levels()) logger.info("==========================================") logger.info(" Starting Log Server") logger.info("==========================================") # create flask app app = Flask(__name__) # load from config, if required auth_token = get_auth_token() # declare API handler for incoming log messages @app.route("/api/log", methods=["POST"]) def log_message(): if auth_token is not None and auth_token != extract_auth_token(): logger.warn("Incoming request with invalid authentication") abort(401) message = JSON(request.get_json()) msg = message.msg if "trace" in message and message.trace is not None and isinstance(message.trace, list): trace = "\n".join(message.trace) msg = f"{msg}\n{trace}"