def upload_to_github(file_path, nwjs_version): github = GitHub(auth_token()) releases = github.repos(GITHUB_REPO).releases.get() release = create_or_get_release_draft(github, releases, nwjs_version) params = {'name': os.path.basename(file_path)} headers = {'Content-Type': 'application/zip'} with open(file_path, 'rb') as f: github.repos(GITHUB_REPO).releases(release['id']).assets.post( params=params, headers=headers, data=f, verify=False)
def upload_to_github(file_path, nwjs_version): github = GitHub(auth_token()) releases = github.repos(GITHUB_REPO).releases.get() release = create_or_get_release_draft(github, releases, nwjs_version) params = {'name': os.path.basename(file_path) } headers = {'Content-Type': 'application/zip'} with open(file_path, 'rb') as f: github.repos(GITHUB_REPO).releases(release['id']).assets.post( params=params, headers=headers, data=f, verify=False)
def get_issues_api(): if app.config['REPORT_PARSING_ISSUES']: access_token = app.config['GITHUB_ACCESS_TOKEN'] repo_owner = app.config['GITHUB_REPO_OWNER'] repo_name = app.config['GITHUB_REPO_NAME'] gh = GitHub(access_token=access_token) return gh.repos(repo_owner)(repo_name).issues else: return None
def cron(): from mod_ci.controllers import start_platform from run import config, log from database import create_session from github import GitHub log.info('Run the cron for kicking off CI platform(s).') # Create session db = create_session(config['DATABASE_URI']) gh = GitHub(access_token=config['GITHUB_TOKEN']) repository = gh.repos(config['GITHUB_OWNER'])(config['GITHUB_REPOSITORY']) start_platform(db, repository)
def cron(testing=False): """Script to run from cron for Sampleplatform.""" from mod_ci.controllers import start_platforms, kvm_processor, TestPlatform from flask import current_app from run import config, log from database import create_session from github import GitHub log.info('Run the cron for kicking off CI platform(s).') # Create session db = create_session(config['DATABASE_URI']) gh = GitHub(access_token=config['GITHUB_TOKEN']) repository = gh.repos(config['GITHUB_OWNER'])(config['GITHUB_REPOSITORY']) if testing is True: kvm_processor(current_app._get_current_object(), db, config.get('KVM_LINUX_NAME', ''), TestPlatform.linux, repository, None) else: start_platforms(db, repository)
def get_comments(owner, repo, user, csvfile): gh = GitHub(access_token=GITHUB_ACCESS_TOKEN) page = 1 writer = DictWriter(csvfile, fieldnames=CSV_FIELD_NAMES) writer.writeheader() while True: print "Getting page {}".format(page) new_comments = gh.repos(owner)(repo).pulls.comments.get(page=page) if len(new_comments) == 0: break else: page = page + 1 for comment in new_comments: if comment['user']['login'] == user: row = { 'message': comment['body'].encode('utf8'), 'url': comment['html_url'], 'username': comment['user']['login'] } writer.writerow(row)
from github import GitHub gh = GitHub() user = gh.users('stevesun112').get() #print( user ) commit = gh.repos('imsure', 'hello-antares').commits('083a8604a73dcb5eda83a5bdd6638a93cfa60045').get() #print( commit[ 'html_url' ] ) search = gh.search.code.get(q="addClass in:file language:js repo:jquery/jquery") #print( search ) search = gh.search.code.get(q="create_table in:file language:py repo:imsure/hello-antares") #print( search[ 'items' ][0][ 'html_url' ] ) user = gh.users('AzNOAOTares').get() print( user ) commit = gh.repos('AzNOAOTares', 'antares-docs').commits('ee22aff520fba4e69971c9ac86a383e0b2374bb6').get() print( commit[ 'html_url' ] ) # search = gh.search.code.get(q="addClass in:file language:js repo:jquery/jquery") #print( search ) search = gh.search.code.get(q="maketitle in:file language:tex repo:AzNOAOTares/antares-docs") print( search[ 'items' ][0][ 'html_url' ] ) user = gh.users('AzNOAOTares').get() print( user ) commit = gh.repos('AzNOAOTares', 'architecture').commits('93d4c7d2e6d6950dbeebff0de9c33941ecf3d109').get() print( commit[ 'html_url' ] ) # search = gh.search.code.get(q="addClass in:file language:js repo:jquery/jquery") #print( search ) #search = gh.search.code.get(q="maketitle in:file language:tex repo:AzNOAOTares/antares-docs") #print( search[ 'items' ][0][ 'html_url' ] )
def progress_reporter(test_id, token): from run import config, log # Verify token test = Test.query.filter(Test.id == test_id).first() if test is not None and test.token == token: if 'type' in request.form: if request.form['type'] == 'progress': # Progress, log status = TestStatus.from_string(request.form['status']) progress = TestProgress(test.id, status, request.form['message']) g.db.add(progress) g.db.commit() gh = GitHub(access_token=g.github['bot_token']) repository = gh.repos(g.github['repository_owner'])( g.github['repository']) # If status is complete, remove the Kvm entry if status in [TestStatus.completed, TestStatus.canceled]: kvm = Kvm.query.filter(Kvm.test_id == test_id).first() if kvm is not None: g.db.delete(kvm) g.db.commit() # Start next test if necessary start_ci_vm(g.db, repository, 60) # Post status update state = Status.PENDING message = 'Tests queued' target_url = url_for('test.by_id', test_id=test.id, _external=True) context = "CI - %s" % test.platform.value if status == TestStatus.canceled: state = Status.ERROR message = 'Tests aborted due to an error; please check' elif status == TestStatus.completed: # Determine if success or failure # It fails if any of these happen: # - A crash (unexpected exit code) # - A not None value on the "got" of a TestResultFile ( # meaning the hashes do not match) crashes = g.db.query(count(TestResult.exit_code)).filter( and_(TestResult.test_id == test.id, TestResult.exit_code != TestResult.expected_rc)).scalar() results_zero_rc = g.db.query(RegressionTest.id).filter( RegressionTest.expected_rc == 0).subquery() results = g.db.query(count(TestResultFile.got)).filter( and_( TestResultFile.test_id == test.id, TestResultFile.regression_test_id.in_( results_zero_rc), TestResultFile.got.isnot(None))).scalar() log.debug( 'Test {id} completed: {crashes} crashes, {results} ' 'results'.format(id=test.id, crashes=crashes, results=results)) if crashes > 0 or results > 0: state = Status.FAILURE message = 'Not all tests completed successfully, ' \ 'please check' else: state = Status.SUCCESS message = 'Tests completed' else: message = progress.message gh_commit = repository.statuses(test.commit) gh_commit.post(state=state, description=message, context=context, target_url=target_url) elif request.form['type'] == 'equality': log.debug('Equality for {t}/{rt}/{rto}'.format( t=test_id, rt=request.form['test_id'], rto=request.form['test_file_id'])) rto = RegressionTestOutput.query.filter( RegressionTestOutput.id == request.form['test_file_id']).first() if rto is None: # Equality posted on a file that's ignored presumably log.info('No rto for {test_id}: {test}'.format( test_id=test_id, test=request.form['test_id'])) else: result_file = TestResultFile(test.id, request.form['test_id'], rto.id, rto.correct) g.db.add(result_file) g.db.commit() elif request.form['type'] == 'logupload': # File upload, process if 'file' in request.files: uploaded_file = request.files['file'] filename = secure_filename(uploaded_file.filename) if filename is '': return 'EMPTY' temp_path = os.path.join( config.get('SAMPLE_REPOSITORY', ''), 'TempFiles', filename) # Save to temporary location uploaded_file.save(temp_path) final_path = os.path.join( config.get('SAMPLE_REPOSITORY', ''), 'LogFiles', '{id}{ext}'.format(id=test.id, ext='.txt')) os.rename(temp_path, final_path) elif request.form['type'] == 'upload': log.debug('Upload for {t}/{rt}/{rto}'.format( t=test_id, rt=request.form['test_id'], rto=request.form['test_file_id'])) # File upload, process if 'file' in request.files: uploaded_file = request.files['file'] filename = secure_filename(uploaded_file.filename) if filename is '': return 'EMPTY' temp_path = os.path.join( config.get('SAMPLE_REPOSITORY', ''), 'TempFiles', filename) # Save to temporary location uploaded_file.save(temp_path) # Get hash and check if it's already been submitted hash_sha256 = hashlib.sha256() with open(temp_path, "rb") as f: for chunk in iter(lambda: f.read(4096), b""): hash_sha256.update(chunk) file_hash = hash_sha256.hexdigest() filename, file_extension = os.path.splitext(filename) final_path = os.path.join( config.get('SAMPLE_REPOSITORY', ''), 'TestResults', '{hash}{ext}'.format(hash=file_hash, ext=file_extension)) os.rename(temp_path, final_path) rto = RegressionTestOutput.query.filter( RegressionTestOutput.id == request.form['test_file_id']).first() result_file = TestResultFile(test.id, request.form['test_id'], rto.id, rto.correct, file_hash) g.db.add(result_file) g.db.commit() elif request.form['type'] == 'finish': log.debug('Finish for {t}/{rt}'.format( t=test_id, rt=request.form['test_id'])) # Test was done regression_test = RegressionTest.query.filter( RegressionTest.id == request.form['test_id']).first() result = TestResult(test.id, regression_test.id, request.form['runTime'], request.form['exitCode'], regression_test.expected_rc) g.db.add(result) g.db.commit() return "OK" return "FAIL"
from github import GitHub from pprint import pprint from datetime import date, timedelta from time import strptime, mktime from os import getenv trello_client = TrelloClient( api_key=getenv("TRELLO_API_KEY"), api_secret=getenv("TRELLO_API_SECRET"), token=getenv("TRELLO_TOKEN") ) github_client = GitHub(access_token=getenv("GITHUB_ACCESS_TOKEN")) backlog = trello_client.get_list(list_id='5361b7091b0f3942310ab040') backlog.client = trello_client issues = github_client.repos('freedomofpress')('securedrop').issues.get(state='open') pprint(issues[0]) pprint(backlog) # a_month_ago = date.today() - timedelta(weeks=4) # # for issue in issues: # time_struct = strptime(issue['updated_at'], u"%Y-%m-%dT%H:%M:%SZ") # issue.date_time = mktime(time_struct) # # relevant_issues = [issue for issue in issues if date.fromtimestamp(issue.date_time) > a_month_ago] # # for issue in relevant_issues: # pprint(issue['updated_at']) # name = "%s - %s" % (issue['number'],issue['title'])
conn = sqlite3.connect(db_path) # Connect to the database c = conn.cursor() # Create the SQLite3 table to store the info, if it's not already present sql = ('CREATE TABLE IF NOT EXISTS social_stats (project TEXT, time_stamp TEXT, ' 'watchers INTEGER, stars INTEGER, forks INTEGER, commits INTEGER, downloads INTEGER)') c.execute(sql) conn.commit() # Loop through the projects in the config file for project in config.sections(): # Extract the number of watchers, stars, and forks on GitHub repo_data = gh.repos(project).get() watchers = repo_data['subscribers_count'] stars = repo_data['stargazers_count'] forks = repo_data['forks_count'] # Count the # of commits in the last 24 hours now = datetime.datetime.utcnow() yesterday = now + datetime.timedelta(days=-1) commits = gh.repos(project).commits.get(since=yesterday) commits_count = len(commits) # Count how many downloads have occurred (ever) for the project # Note - For each project there is an outer loop of "releases" (eg v3.6.0), with an inner loop of "assets" inside # each release (with each asset having it's own download counter). eg: an .exe and a .dmg might be two assets in # the same v3.6.0 release. The .exe might have 10,000 downloads, and the .dmg might have 3,000. download_counter = 0
class Processor: """ This class holds all the logic to process GitHub notifications and comment on previous ones (to report a status, ...). """ _conn = None def __init__(self, debug=False): """ Constructor for the Processor class. :param debug: If set to True, the console will also log debug messages. :return: """ # Init GitHub with the configured access token self.g = GitHub(access_token=Configuration.token) self.debug = debug loggers = LogConfiguration(self.debug) self.logger = loggers.create_logger("Processor") @staticmethod def generate_random_string( length=32, chars=string.ascii_uppercase + string.digits): """ Generates a random string with a given length and character set :param length: The length of the random string. 32 by default. :param chars: The characters that should be used. Uppercase + digits by default. :return: A randomly generated string of given length. """ return ''.join( random.SystemRandom().choice(chars) for _ in range(length)) def run(self): """ Runs the script by fetching new notifications and running through it, as well as reporting back for all the messages in the database queue. :return: """ self.logger.info("Start of bot") # Create connection to the DB self._conn = pymysql.connect( host=Configuration.database_host, user=Configuration.database_user, passwd=Configuration.database_password, db=Configuration.database_name, charset='latin1', cursorclass=pymysql.cursors.DictCursor) self.logger.debug("Fetching notifications") notifications = self.g.notifications.get() if len(notifications) > 0: self.logger.debug("We got {0} new notifications".format( len(notifications))) # Get valid forks open_forks = self.get_forks() # Run through notifications for notification in notifications: repo_name = notification.repository.full_name self.logger.info("Got a notification in {0}".format(repo_name)) if repo_name in open_forks: url = notification.subject.url parts = url.split('/') not_id = parts[-1] not_type = notification.subject.type repo_owner = notification.repository.owner.login self.logger.info("Valid notification: {0} #{1}".format( not_type, not_id)) self.logger.debug("Repository owned by: {0}".format( repo_owner)) if not_type == "Issue": self.logger.debug("Fetching issue") issue = self.g.repos(repo_owner)( Configuration.repo_name).issues(not_id).get() comments = self.g.repos(repo_owner)( Configuration.repo_name).issues(not_id).comments.get() self.run_through_comments( issue, comments, not_type, not_id, repo_owner, open_forks[repo_name]) elif not_type == "PullRequest": self.logger.debug("Fetching PR") request = self.g.repos(repo_owner)( Configuration.repo_name).pulls(not_id).get() # For some reason, the comments for the PR are issues... comments = self.g.repos(repo_owner)( Configuration.repo_name).issues(not_id).comments.get() self.run_through_comments( request, comments, not_type, not_id, repo_owner, open_forks[repo_name]) elif not_type == "Commit": self.logger.debug("Fetching Commit") commit = self.g.repos(repo_owner)( Configuration.repo_name).commits(not_id).get() comments = self.g.repos(repo_owner)( Configuration.repo_name).commits( not_id).comments().get() self.run_through_comments( commit, comments, not_type, not_id, repo_owner, open_forks[repo_name]) else: self.logger.warn("Unknown type!") else: self.logger.warn( "skipped notification because {0} is not the correct " "repository (expected a fork of {1})".format( notification.repository.full_name, Configuration.repo_owner + '/' + Configuration.repo_name)) # Unsubscribe from notification self.logger.debug( "Unsubscribing from notification {0}".format( notification.id)) self.g.notifications().threads( notification.id).subscription().delete() # Marks notifications as read self.logger.debug("Marking notifications as read") self.g.notifications().put() else: self.logger.info("No notifications for now") # Process pending GitHub queue for comments with self._conn.cursor() as c: if c.execute( "SELECT g.id, g.message, t.* FROM github_queue g " "JOIN test t ON g.test_id = t.id " "ORDER BY g.id ASC") > 0: self.logger.info("Processing GitHub messages queue") row = c.fetchone() to_delete = [] while row is not None: # Process row self.logger.debug("Processing row") owner = row["repository"].replace( "git://github.com/","").replace( "/"+Configuration.repo_name+".git","") self.logger.debug("Owner of the repository to " "reply back to: {0}".format(owner)) if row["type"] == "Commit": self.logger.info("Reporting back on Commit") self.g.repos(owner)( Configuration.repo_name).commits( row["commit_hash"]).comments.post( body=row["message"]) elif row["type"] == "PullRequest" \ or row["type"] == "Issue": self.logger.info("Reporting back to PR or issue") self.g.repos(owner)( Configuration.repo_name).issues( row["commit_hash"]).comments.post( body=row["message"]) else: self.logger.warn("Unknown test type!") to_delete.append(str(row["id"])) row = c.fetchone() # Delete processed rows c.execute("DELETE FROM github_queue WHERE id IN (" ""+(",".join(to_delete))+")") self._conn.commit() else: self.logger.info("No GitHub items to process") # Calling VM part if c.execute("SELECT * FROM test_queue") >= 1: # Run main method of the Python VM script self.logger.info("Call VM script") p = multiprocessing.Process(target=run_vm.main, args=(self.debug,)) p.start() self.logger.info("VM script launched") # Closing connection to DB self._conn.close() self._conn = None self.logger.info("End of bot") def get_forks(self): """ Obtains all the forks of the original repository, to restrict the usage of the bot to a valid fork. :return: A dictionary with the full repository name as key and the git as value. """ names = {} self.logger.info("Fetching GitHub forks of {0}".format( Configuration.repo_name)) page = 1 while True: forks = self.g.repos(Configuration.repo_owner)( Configuration.repo_name).forks().get(page=page) if len(forks) > 0: page = page + 1 for fork in forks: self.logger.debug("Processing fork: {0}".format(fork.full_name)) if not fork.private: self.logger.debug("Added fork to the list") names[fork.full_name] = fork.git_url else: self.logger.warn( "Skipped {0} because it's a private fork".format( fork.full_name)) else: break # Don't forget to add the original too repository = self.g.repos(Configuration.repo_owner)( Configuration.repo_name).get() names[repository.full_name] = repository.git_url # End of obtaining forks self.logger.info("Found {0} valid forks".format(len(names))) return names def run_through_comments(self, initial, comment_list, initial_type, initial_id, repository_owner, fork): """ Runs through a given comment list, and if nothing is found there, it checks the initial comment. :param initial: The initial commit/issue/PR. :param comment_list: A list of comments on the commit/issue/PR. :param initial_type: The type of the initial comment ( commit/issue/PR). :param initial_id: The GitHub id of the initial comment. :param repository_owner: The owner of the repository that was mentioned. :param fork: Information about the fork. :return: """ mentioned = False for idx, comment in reversed(list(enumerate(comment_list))): user = comment.user.login if user == Configuration.bot_name: self.logger.info("Comment {0} is from myself, handled " "comments above".format(idx)) mentioned = True break message = comment.body if not self.contains_mention(message): self.logger.debug(u"Ignoring comment {0} from {1}, because " "the content ({2}) does not contain a " "mention".format(idx, user, message)) continue self.logger.debug( u"Processing comment {0}, coming from {1} (content: " "{2})".format(idx, user, message)) mentioned = True if not self.allowed_local(user, fork): self.g.repos(repository_owner)( Configuration.repo_name).issues( initial_id).comments.post( body=BotMessages.untrustedUser) break if self.process_comment(message, initial_type, initial_id, repository_owner, fork, user, comment.html_url, comment.created_at): break if not mentioned: self.logger.debug("Parsing original comment") if initial_type == "Commit": message = initial.commit.message else: message = initial.body if not self.contains_mention(message): return user = initial.user.login if not self.allowed_local(user, fork): self.g.repos(repository_owner)( Configuration.repo_name).issues( initial_id).comments.post( body=BotMessages.untrustedUser) return self.process_comment(message, initial_type, initial_id, repository_owner, fork, user, initial.html_url, initial.created_at) def contains_mention(self, message): """ Validates the given comment, based on if it contains a mention to the bot and if the user is allowed to run the bot. :param message: The message to be checked :return: True if a mention is found, false otherwise """ if "@" + Configuration.bot_name not in message: return False self.logger.debug("Found mention in message") return True def process_comment(self, message, original_type, original_id, repository_owner, fork, user, comment_link, timestamp): """ Processes a comment and executes the given (valid) commands. :param message: The message to process. :param original_type: The type (issue, pull request) where the command is coming from. :param original_id: The original issue/PR id. :param repository_owner: The owner of the repository :param fork: Information about the fork. :param user: The user that posted the comment. :param comment_link: The link to the comment. :param timestamp: The time of the comment. :return: True if the comment was processed. """ body = message.lower() words = body.split() self.logger.debug("Found the next words: {0}".format(words)) if "runtests" in words: self.store_command(timestamp, "runtests", user, comment_link) if original_type == "Commit": # We need to have a branch as well... try: branch = words[words.index("runtests") + 1] except IndexError: self.g.repos(repository_owner)( Configuration.repo_name).commits( original_id).comments.post( body=BotMessages.branchMissing) return True if not self.is_valid_branch(repository_owner, branch): self.g.repos(repository_owner)( Configuration.repo_name).commits( original_id).comments.post( body=BotMessages.branchInvalid.format(branch)) return True self.logger.info( "Adding {0}, branch {1} and commit {2} to the test " "queue".format( repository_owner + '/' + Configuration.repo_name, branch, original_id )) queue_id = self.store_in_queue(fork, branch, original_id, original_type) self.g.repos(repository_owner)( Configuration.repo_name).commits( original_id).comments.post( body=BotMessages.acknowledged.format( queue_id, Configuration.progress_url.format(queue_id) )) elif original_type == "PullRequest": self.logger.info( "Storing data in queue for id {0}".format(original_id)) # A Pull Request has no branch, so we pass in a 'special' # name which the processing script will recognize queue_id = self.store_in_queue(fork, "-_-", original_id, original_type) self.g.repos(repository_owner)( Configuration.repo_name).issues( original_id).comments.post( body=BotMessages.acknowledged.format( queue_id, Configuration.progress_url.format(queue_id) )) else: self.logger.info("run tests command not supported for issue " "(# {0})".format(original_id)) self.g.repos(repository_owner)( Configuration.repo_name).issues( original_id).comments.post( body=BotMessages.invalidCommand) else: self.logger.warn("Body did not contain a valid command") if original_type == "Commit": self.g.repos(repository_owner)( Configuration.repo_name).commits( original_id).comments.post( body=BotMessages.invalidCommand) else: self.g.repos(repository_owner)( Configuration.repo_name).issues( original_id).comments.post( body=BotMessages.invalidCommand) return True def is_valid_branch(self, repository_owner, branch): """ Validates a given branch on a given repository by checking the refs object on the GitHub api. :param repository_owner: The owner of the repository. :param branch: The branch we want to check the validity of. :return: True if the branch exists, false otherwise. """ self.logger.debug("Checking if the branch ({0}) is " "valid...".format(branch)) try: number = self.g.repos(repository_owner)( Configuration.repo_name).git().refs.heads(branch).get() self.logger.debug("Type of return result: {0}".format(type( number))) # The API returns a list if there is no exact match, so we need to # filter that out too. if type(number) is list: return False return True except ApiNotFoundError: self.logger.warn("API error on checking branch!") return False def store_in_queue(self, fork, branch, original_id, original_type): """ Adds an entry into the database so that it can be processed later. :param fork: The name of the fork/git location. :param branch: The branch we need to switch to :param original_id: The commit_hash/PR/Issue nr. :param original_type: The type (Commit/PR/Issue) :return: """ self.logger.info("Storing request in queue") with self._conn.cursor() as c: token = self.generate_random_string(32) self.logger.debug("Generated token: {0}".format(token)) c.execute( "INSERT INTO `test`(`id`,`token`,`repository`,`branch`," "`commit_hash`, `type`) VALUES (NULL,%s,%s,%s,%s,%s);", (token, fork, branch, original_id, original_type)) self._conn.commit() # Trailing comma is necessary or python will raise a # ValueError. insert_id = c.lastrowid self.logger.debug("Inserted id: {0}".format(insert_id)) if c.execute("SELECT id FROM local_repos WHERE github = %s " "LIMIT 1;", (fork,)) == 1: # Local self.logger.info("Local request") c.execute( "INSERT INTO `local_queue` (`test_id`) VALUES (%s);", (insert_id,)) else: # VM self.logger.info("VM request") c.execute( "INSERT INTO `test_queue` (`test_id`) VALUES (%s);", (insert_id,)) self._conn.commit() # Check which queue's just have a single item, and run the # appropriate script for those if c.execute("SELECT * FROM local_queue") == 1: # Call shell script to activate worker self.logger.info("Calling local script") fh = open("out.txt", "w") code = subprocess.call( [Configuration.worker_script, token], stdout=fh, stderr=subprocess.STDOUT ) self.logger.info( "Local script completed with {0}".format(code)) fh.close() fh = open("out.txt", "r") self.logger.debug("Local script returned:") self.logger.debug(fh.read()) fh.close() return insert_id def store_command(self, timestamp, command_type, user, comment_link): """ Stores a given command in the database, so that we can find out later who gave which commands. :param timestamp: The GitHub timestamp, in the ISO 8601 format :param command_type: The command type :param user: The user that gave the command :param comment_link: The link to the message that was posted on GitHub. :return: """ self.logger.info("Storing command for history") # Convert given timestamp to a python object date = dateutil.parser.parse(timestamp) # Format to MySQL datetime datetime = date.strftime('%Y-%m-%d %H:%M:%S') with self._conn.cursor() as c: self.logger.debug(datetime) c.execute('INSERT INTO cmd_history VALUES (NULL, %s, %s, %s, ' '%s)',(datetime, command_type, user, comment_link)) self._conn.commit() self.logger.debug("Stored command") def allowed_local(self, user, fork): # Owner of fork is always allowed if "git://github.com/"+user+"/ccextractor.git" == fork: self.logger.debug("{0} seems to be the owner of {1}".format( user, fork)) return True # If the fork can be ran local, only trusted users should be # allowed for security reasons if self.is_local(fork) and not self.is_user_trusted(user): return False return True def is_local(self, fork): with self._conn.cursor() as c: if c.execute( "SELECT id FROM local_repos WHERE github = %s LIMIT 1", (fork,)) == 1: self.logger.debug("Repository {0} is marked to be ran " "locally in the DB".format(fork)) return True return False def is_user_trusted(self, user): with self._conn.cursor() as c: if c.execute( "SELECT id FROM trusted_users WHERE user = %s LIMIT 1", (user,)) == 1: self.logger.debug("User {0} is marked as trusted in the " "DB".format(user)) return True return False
from github import GitHub gh = GitHub(username="******", password="******") oh_issues = gh.repos( "openhatch")("oh-mainline").issues.get( state="all", labels="bugimporters", per_page=50) total_issues = len(oh_issues) for index, issue in enumerate(oh_issues): print "Processing issue #%i of %i" % (index+1, total_issues) title = issue.get("title") body = issue.get("body") assignee = issue["assignee"]["login"] if issue.get("assignee") else None milestone = issue.get('milestone') state = issue.get("state") labels = [label['name'] for label in issue['labels']] comment_count = issue.get("comments") if comment_count > 0: comments = gh.repos( "openhatch")("oh-mainline").issues(issue["number"]).comments.get() for comment in comments: comment_body = comment.get("body") creator = comment.get("user")["login"] creation_time = comment.get("created_at") text = "<hr/> **%s** commented at %s: <br/> %s" % ( creator, creation_time, comment_body)
class GitHubContext(pullrequest.context.Context): updatePullRequestsDelay = 30 # seconds name = 'GitHub Pull Requests' dbname = 'pullrequests_github' urlpath = 'pullrequests_gh' builders = dict( linux=dict(name='Linux x64', builders=['precommit_linux64'], order=100), windows=dict(name='Win x64', builders=['precommit_windows64'], order=200), win32=dict(name='Win 32', builders=['precommit_windows32'], order=250), macosx=dict(name='Mac', builders=['precommit_macosx'], order=300), android=dict(name='Android', builders=['precommit_android'], order=400), ) username = '******' repo = 'test' client = None @defer.inlineCallbacks def updatePullRequests(self): print 'Updating pull requests from GitHub...' if not self.client: self.client = GitHub(userAgent=userAgent, async=True, reuseETag=True, access_token=githubAccessToken) gh_pullrequests = yield self.client.repos(self.username)(self.repo).pulls.get(state='open', per_page=100) if self.client.status == 304: print "GitHub pull requests was not changed" defer.returnValue(None) elif self.client.status == 200: prs = [] for gh_pullrequest in gh_pullrequests: pr = {} pr['id'] = gh_pullrequest['number'] pr['branch'] = gh_pullrequest['base']['ref'] pr['author'] = gh_pullrequest['user']['login'] pr['assignee'] = gh_pullrequest['assignee']['login'] if gh_pullrequest['assignee'] else None pr['head_user'] = gh_pullrequest['head']['repo']['owner']['login'] pr['head_repo'] = gh_pullrequest['head']['repo']['name'] pr['head_branch'] = gh_pullrequest['head']['ref'] pr['head_sha'] = gh_pullrequest['head']['sha'] pr['title'] = gh_pullrequest['title'] pr['description'] = gh_pullrequest['body'] prs.append(pr) defer.returnValue(prs) raise Exception('invalid status', self.client.status) def getListOfAutomaticBuilders(self, pr): if pr.description is not None and '**WIP**' in pr.description: return [] buildersList = [ 'linux', 'windows', 'win32', # 'macosx', # 'android' ] return buildersList def getBuildProperties(self, pr, b, properties, sourcestamps): prid = pr.prid properties.setProperty('branch', pr.branch, 'Pull request') properties.setProperty('head_sha', pr.head_sha, 'Pull request') properties.setProperty('pullrequest', prid, 'Pull request') if b.isPerf: regressionTestFilter = self.extractRegressionTestFilter(pr.description) if regressionTestFilter is not None: properties.setProperty('regression_test_filter', regressionTestFilter, 'Pull request') else: print 'ERROR: Can\'t schedule perf precommit build without regression test filter. Use check_regression parameter' defer.returnValue(False) if pr.description is None or '**WIP**' in pr.description: self.pushBuildProperty(properties, pr.description, 'test[s]?_filter[s]?', 'test_filter') sourcestamps.append(dict( codebase='code', repository='https://github.com/%s/%s.git' % (self.username, self.repo), branch=pr.branch)) sourcestamps.append(dict( codebase='code_merge', repository='https://github.com/%s/%s.git' % (pr.head_user, pr.head_repo), branch=pr.head_branch, revision=pr.head_sha)) return True def getWebAddressPullRequest(self, pr): return 'https://github.com/%s/%s/pull/%s' % (self.username, self.repo, pr.prid) def getWebAddressPerfRegressionReport(self, pr): return None
#!/usr/bin/python import sys from os import path # Need to append server root path to ensure we can import the necessary files. sys.path.append(path.dirname(path.dirname(path.abspath(__file__)))) # noinspection PyPep8 from mod_ci.controllers import start_ci_vm # noinspection PyPep8 from run import config, log # noinspection PyPep8 from database import create_session # noinspection PyPep8 from github import GitHub log.info('Running cron.py CI scheduler') # Create session db = create_session(config['DATABASE_URI']) gh = GitHub(access_token=config['GITHUB_TOKEN']) repository = gh.repos(config['GITHUB_OWNER'])(config['GITHUB_REPOSITORY']) # Kick off start_ci_vm start_ci_vm(db, repository)
def start_ci(): """ Gets called when the webhook on GitHub is triggered. Reaction to the next events need to be processed (after verification): - Ping (for fun) - Push - Pull Request - Issues """ if request.method != 'POST': return 'OK' else: abort_code = 418 event = request.headers.get('X-GitHub-Event') if event == "ping": return json.dumps({'msg': 'Hi!'}) x_hub_signature = request.headers.get('X-Hub-Signature') if not is_valid_signature(x_hub_signature, request.data, g.github['ci_key']): g.log.warning( 'CI signature failed: {sig}'.format(sig=x_hub_signature)) abort(abort_code) payload = request.get_json() if payload is None: g.log.warning( 'CI payload is empty: {payload}'.format(payload=payload)) abort(abort_code) gh = GitHub(access_token=g.github['bot_token']) repository = gh.repos(g.github['repository_owner'])( g.github['repository']) if event == "push": # If it's a push, and the 'after' hash is available, then it's a commit, so run the tests if 'after' in payload: commit = payload['after'] gh_commit = repository.statuses(commit) # Update the db to the new last commit ref = repository.git().refs('heads/master').get() last_commit = GeneralData.query.filter( GeneralData.key == 'last_commit').first() for platform in TestPlatform.values(): commit_name = 'fetch_commit_' + platform fetch_commit = GeneralData.query.filter( GeneralData.key == commit_name).first() if fetch_commit is None: prev_commit = GeneralData(commit_name, last_commit.value) g.db.add(prev_commit) last_commit.value = ref['object']['sha'] g.db.commit() queue_test(g.db, gh_commit, commit, TestType.commit) else: g.log.warning( 'Unknown push type! Dumping payload for analysis') g.log.debug(payload) elif event == "pull_request": # If it's a valid PR, run the tests commit = '' gh_commit = None pr_nr = payload['pull_request']['number'] if payload['action'] in ['opened', 'synchronize', 'reopened']: try: commit = payload['pull_request']['head']['sha'] gh_commit = repository.statuses(commit) except KeyError: g.log.critical( "Didn't find a SHA value for a newly opened PR!") g.log.debug(payload) # Check if user blacklisted user_id = payload['pull_request']['user']['id'] if BlockedUsers.query.filter( BlockedUsers.userID == user_id).first() is not None: g.log.critical("User Blacklisted") gh_commit.post( state=Status.ERROR, description= "CI start aborted. You may be blocked from accessing this functionality", target_url=url_for('home.index', _external=True)) return 'ERROR' queue_test(g.db, gh_commit, commit, TestType.pull_request, pr_nr=pr_nr) elif payload['action'] == 'closed': g.log.debug('PR was closed, no after hash available') # Cancel running queue tests = Test.query.filter(Test.pr_nr == pr_nr).all() for test in tests: # Add canceled status only if the test hasn't started yet if len(test.progress) > 0: continue progress = TestProgress(test.id, TestStatus.canceled, "PR closed", datetime.datetime.now()) g.db.add(progress) repository.statuses(test.commit).post( state=Status.FAILURE, description="Tests canceled", context="CI - {name}".format(name=test.platform.value), target_url=url_for('test.by_id', test_id=test.id, _external=True)) elif event == "issues": issue_data = payload['issue'] issue = Issue.query.filter( Issue.issue_id == issue_data['number']).first() if issue is not None: issue.title = issue_data['title'] issue.status = issue_data['state'] g.db.commit() else: # Unknown type g.log.warning('CI unrecognized event: {event}'.format(event=event)) return json.dumps({'msg': 'EOL'})
elif member["bioguide_id"] in websites: member["website"] = websites[member["bioguide_id"]] readme.write(row_template.format(**member)) else: readme.write(row_template_no_web.format(**member)) if args.issues: print("Only create github issues if you're sure you're ready") print( "The line actually creating issues is commented out to protect you from yourself" ) print("So go uncomment it when you're really ready") username = raw_input("Github username:"******"Github password:"******"[%s] Rep. %s" % (m, new_reps[m]) body = "Newly elected to 114th congress" #repo.issues.post(title=title, body=body) for m in new_senators: title = "[%s] Sen. %s" % (m, new_reps[m]) body = "Newly elected to 114th congress" #repo.issues.post(title=title, body=body) #do we also want to create them for existing legislators in case they change anything? ##the decision seems to be no for existing legislators right now.
def start_ci(): """ Function that track the event occuring at the repository Events that are tracked: Push: Run the tests and update last commit Pull Request: If it is a pr, run the tests Issues: Update the status of recoded issues """ if request.method != 'POST': return 'OK' else: abort_code = 418 event = request.headers.get('X-GitHub-Event') if event == "ping": return json.dumps({'msg': 'Hi!'}) x_hub_signature = request.headers.get('X-Hub-Signature') if not is_valid_signature(x_hub_signature, request.data, g.github['ci_key']): g.log.warning('CI signature failed: %s' % x_hub_signature) abort(abort_code) payload = request.get_json() if payload is None: g.log.warning('CI payload is empty: %s' % payload) abort(abort_code) gh = GitHub(access_token=g.github['bot_token']) repository = gh.repos(g.github['repository_owner'])( g.github['repository']) if event == "push": # If it's a push, and the 'after' hash is available, then it's # a commit, so run the tests if 'after' in payload: commit = payload['after'] gh_commit = repository.statuses(commit) # Update the db to the new last commit ref = repository.git().refs('heads/master').get() last_commit = GeneralData.query.filter( GeneralData.key == 'last_commit').first() for platform in TestPlatform.values(): commit_name = 'fetch_commit_' + platform fetch_commit = GeneralData.query.filter( GeneralData.key == commit_name).first() if fetch_commit is None: prev_commit = GeneralData(commit_name, last_commit.value) g.db.add(prev_commit) last_commit.value = ref['object']['sha'] g.db.commit() queue_test(g.db, repository, gh_commit, commit, TestType.commit) else: g.log.warning('Unknown push type! Dumping payload for ' 'analysis') g.log.debug(payload) elif event == "pull_request": # If it's a PR, run the tests if payload['action'] == 'opened': try: commit = payload['pull_request']['head']['sha'] except KeyError: g.log.critical( "Didn't find a SHA value for a newly opened PR!") g.log.debug(payload) commit = '' elif payload['action'] == 'closed': g.log.debug('PR was closed, no after hash available') commit = '' else: try: commit = payload['after'] except KeyError: g.log.critical("Didn't find the after SHA for the " "updated commit!") g.log.debug(payload) commit = '' pr_nr = payload['pull_request']['number'] gh_commit = repository.statuses(commit) if payload['action'] == 'opened': # Run initial tests queue_test(g.db, repository, gh_commit, commit, TestType.pull_request, pr_nr=pr_nr) elif payload['action'] == 'synchronize': # Run/queue a new test set queue_test(g.db, repository, gh_commit, commit, TestType.pull_request, pr_nr=pr_nr) elif payload['action'] == 'closed': # Cancel running queue tests = Test.query.filter(Test.pr_nr == pr_nr).all() for test in tests: # Add canceled status only if the test hasn't started yet if len(test.progress) > 0: continue progress = TestProgress(test.id, TestStatus.canceled, "PR closed", datetime.datetime.now()) g.db.add(progress) repository.statuses(test.commit).post( state=Status.FAILURE, description="Tests canceled", context="CI - %s" % test.platform.value, target_url=url_for('test.by_id', test_id=test.id, _external=True)) elif payload['action'] == 'reopened': # Run tests again queue_test(g.db, repository, gh_commit, commit, TestType.pull_request) elif event == "issues": issue_data = payload['issue'] issue = Issue.query.filter( Issue.issue_id == issue_data['number']).first() if issue is not None: issue.title = issue_data['title'] issue.status = issue_data['state'] g.db.commit() else: # Unknown type g.log.warning('CI unrecognized event: %s' % event) return json.dumps({'msg': 'EOL'})
import getpass from github import GitHub username = raw_input("Github username:"******"Github password:"******"another sample issue" body = "issue created through githubpy" repo.issues.post(title=title, body=body)
def kvm_processor(db, kvm_name, platform, delay): from run import config, log, app if kvm_name == "": log.critical('KVM name is empty!') return if delay is not None: import time log.debug('Sleeping for {time} seconds'.format(time=delay)) time.sleep(delay) # Open connection to libvirt conn = libvirt.open("qemu:///system") if conn is None: log.critical("Couldn't open connection to libvirt!") return try: vm = conn.lookupByName(kvm_name) except libvirt.libvirtError: log.critical("Couldn't find the Linux CI machine named %s" % kvm_name) return vm_info = vm.info() if vm_info[0] != libvirt.VIR_DOMAIN_SHUTOFF: # Running, check expiry (2 hours runtime max) status = Kvm.query.filter(Kvm.name == kvm_name).first() max_runtime = config.get("KVM_MAX_RUNTIME", 120) if status is not None: if datetime.datetime.now() >= status.timestamp + \ datetime.timedelta(minutes=max_runtime): # Mark entry as aborted test_progress = TestProgress( status.test.id, TestStatus.canceled, 'Runtime exceeded') db.add(test_progress) db.delete(status) db.commit() # Abort process if vm.destroy() == -1: # Failed to shut down log.critical("Failed to shut down %s" % kvm_name) return else: log.info("Current job is still running and not expired") return else: log.warn("No currently running task, but VM is running! Hard " "reset necessary") if vm.destroy() == -1: # Failed to shut down log.critical("Failed to shut down %s" % kvm_name) return # Check if there's no KVM status left status = Kvm.query.filter(Kvm.name == kvm_name).first() if status is not None: log.warn("KVM is powered off, but test is still in there: %s" % status.test.id) db.delete(status) db.commit() # Get oldest test for this platform finished_tests = db.query(TestProgress.test_id).filter( TestProgress.status.in_([TestStatus.canceled, TestStatus.completed]) ).subquery() test = Test.query.filter( and_(Test.id.notin_(finished_tests), Test.platform == platform) ).order_by(Test.id.asc()).first() if test is None: log.info('No more tests to run, returning') return # Reset to snapshot if vm.hasCurrentSnapshot() != 1: log.critical("VM %s has no current snapshot set!" % kvm_name) return snapshot = vm.snapshotCurrent() if vm.revertToSnapshot(snapshot) == -1: log.critical("Failed to revert to snapshot %s for VM %s" % ( snapshot.getName(), kvm_name)) return log.info('Reverted to snapshot %s for VM %s' % ( snapshot.getName(), kvm_name)) log.debug('Starting test %s' % test.id) status = Kvm(kvm_name, test.id) # Prepare data # 0) Write url to file with app.app_context(): full_url = url_for('ci.progress_reporter', test_id=test.id, token=test.token, _external=True, _scheme="https") file_path = os.path.join(config.get('SAMPLE_REPOSITORY', ''), 'reportURL') with open(file_path, 'w') as f: f.write(full_url) # 1) Generate test files base_folder = os.path.join( config.get('SAMPLE_REPOSITORY', ''), 'ci-tests') categories = Category.query.order_by(Category.id.desc()).all() # Init collection file multi_test = etree.Element('multitest') for category in categories: if len(category.regression_tests) == 0: # Skip categories without tests continue # Create XML file for test file_name = '{name}.xml'.format(name=category.name) single_test = etree.Element('tests') for regression_test in category.regression_tests: entry = etree.SubElement( single_test, 'entry', id=str(regression_test.id)) command = etree.SubElement(entry, 'command') command.text = regression_test.command input_node = etree.SubElement( entry, 'input', type=regression_test.input_type.value) # Need a path that is relative to the folder we provide # inside the CI environment. input_node.text = regression_test.sample.filename output_node = etree.SubElement(entry, 'output') output_node.text = regression_test.output_type.value compare = etree.SubElement(entry, 'compare') for output_file in regression_test.output_files: file_node = etree.SubElement( compare, 'file', ignore='true' if output_file.ignore else 'false', id=str(output_file.id) ) correct = etree.SubElement(file_node, 'correct') # Need a path that is relative to the folder we provide # inside the CI environment. correct.text = output_file.filename_correct expected = etree.SubElement(file_node, 'expected') expected.text = output_file.filename_expected( regression_test.sample.sha) # Save XML single_test.getroottree().write( os.path.join(base_folder, file_name), encoding='utf-8', xml_declaration=True, pretty_print=True ) # Append to collection file test_file = etree.SubElement(multi_test, 'testfile') location = etree.SubElement(test_file, 'location') location.text = file_name # Save collection file multi_test.getroottree().write( os.path.join(base_folder, 'TestAll.xml'), encoding='utf-8', xml_declaration=True, pretty_print=True ) # 2) Create git repo clone and merge PR into it (if necessary) try: repo = Repo(os.path.join( config.get('SAMPLE_REPOSITORY', ''), 'unsafe-ccextractor')) except InvalidGitRepositoryError: log.critical('Could not open CCExtractor\'s repository copy!') return # Return to master repo.heads.master.checkout(True) # Update repository from upstream try: origin = repo.remote('origin') except ValueError: log.critical('Origin remote doesn\'t exist!') return fetch_info = origin.fetch() if len(fetch_info) == 0: log.warn('No info fetched from remote!') # Pull code (finally) pull_info = origin.pull() if len(pull_info) == 0: log.warn('Didn\'t pull any information from remote!') if pull_info[0].flags > 128: log.critical('Didn\'t pull any information from remote: %s!' % pull_info[0].flags) return # Delete the test branch if it exists, and recreate try: repo.delete_head('CI_Branch', force=True) except GitCommandError: log.warn('Could not delete CI_Branch head') traceback.print_exc() # If PR, merge, otherwise reset to commit if test.test_type == TestType.pull_request: # Fetch PR (stored under origin/pull/<id>/head pull_info = origin.fetch('pull/{id}/head:CI_Branch'.format( id=test.pr_nr)) if len(pull_info) == 0: log.warn('Didn\'t pull any information from remote PR!') if pull_info[0].flags > 128: log.critical('Didn\'t pull any information from remote PR: %s!' % pull_info[0].flags) return try: test_branch = repo.heads['CI_Branch'] except IndexError: log.critical('CI_Branch does not exist') return # Check out branch test_branch.checkout(True) # Rebase on master try: repo.git.rebase('master') except GitCommandError: progress = TestProgress( test.id, TestStatus.preparation, 'Rebase on master' ) db.add(progress) progress = TestProgress( test.id, TestStatus.canceled, 'Merge conflict, please resolve.' ) db.add(progress) db.commit() # Report back gh = GitHub(access_token=g.github['bot_token']) gh_commit = gh.repos(g.github['repository_owner'])( g.github['repository']).statuses(test.commit) with app.app_context(): target_url = url_for( 'test.by_id', test_id=test.id, _external=True) context = "CI - %s" % test.platform.value gh_commit.post( state=Status.ERROR, description='Failed to rebase', context=context, target_url=target_url) # Return, so next one can be handled return # TODO: check what happens on merge conflicts else: test_branch = repo.create_head('CI_Branch', 'HEAD') # Check out branch for test purposes test_branch.checkout(True) try: repo.head.reset(test.commit, working_tree=True) except GitCommandError: log.warn('Git commit %s (test %s) does not exist!' % ( test.commit, test.id)) return # Power on machine try: vm.create() db.add(status) db.commit() except libvirt.libvirtError: log.critical("Failed to launch VM %s" % kvm_name) except IntegrityError: log.warn("Duplicate entry for %s" % test.id)
def start_ci(): if request.method != 'POST': return 'OK' else: abort_code = 418 event = request.headers.get('X-GitHub-Event') if event == "ping": return json.dumps({'msg': 'Hi!'}) x_hub_signature = request.headers.get('X-Hub-Signature') if not is_valid_signature(x_hub_signature, request.data, g.github['ci_key']): g.log.warning('CI signature failed: %s' % x_hub_signature) abort(abort_code) payload = request.get_json() if payload is None: g.log.warning('CI payload is empty: %s' % payload) abort(abort_code) gh = GitHub(access_token=g.github['bot_token']) if event == "push": # If it's a push, run the tests commit = payload['after'] gh_commit = gh.repos(g.github['repository_owner'])( g.github['repository']).statuses(commit) queue_test(g.db, gh_commit, commit, TestType.commit) # Update the db to the new last commit ref = gh.repos(g.github['repository_owner'])( g.github['repository']).git().refs('heads/master').get() last_commit = GeneralData.query.filter(GeneralData.key == 'last_commit').first() last_commit.value = ref['object']['sha'] g.db.commit() elif event == "pull_request": # If it's a PR, run the tests if payload['action'] == 'opened': try: commit = payload['pull_request']['head']['sha'] except KeyError: g.log.critical( "Didn't find a SHA value for a newly opened PR!") g.log.debug(payload) commit = '' elif payload['action'] == 'closed': g.log.debug('PR was closed, no after hash available') commit = '' else: try: commit = payload['after'] except KeyError: g.log.critical("Didn't find the after SHA for the " "updated commit!") g.log.debug(payload) commit = '' pr_nr = payload['pull_request']['number'] gh_commit = gh.repos(g.github['repository_owner'])( g.github['repository']).statuses(commit) if payload['action'] == 'opened': # Run initial tests queue_test(g.db, gh_commit, commit, TestType.pull_request, pr_nr=pr_nr) elif payload['action'] == 'synchronize': # Run/queue a new test set queue_test(g.db, gh_commit, commit, TestType.pull_request, pr_nr=pr_nr) elif payload['action'] == 'closed': # Cancel running queue tests = Test.query.filter(Test.pr_nr == pr_nr).all() for test in tests: # Add canceled status only if the test hasn't started yet if len(test.progress) > 0: continue progress = TestProgress(test.id, TestStatus.canceled, "PR closed", datetime.datetime.now()) g.db.add(progress) gh.repos(g.github['repository_owner'])( g.github['repository']).statuses(test.commit).post( state=Status.FAILURE, description="Tests canceled", context="CI - %s" % test.platform.value, target_url=url_for( 'test.by_id', test_id=test.id, _external=True)) elif payload['action'] == 'reopened': # Run tests again queue_test(g.db, gh_commit, commit, TestType.pull_request) else: # Unknown type g.log.warning('CI unrecognized event: %s' % event) return json.dumps({'msg': 'EOL'})
def blocked_users(): """ Method to render the blocked_users template. This returns a list of all currently blacklisted users. Also defines processing of forms to add/remove users from blacklist. When a user is added to blacklist, removes queued tests on any PR by the user. """ blocked_users = BlockedUsers.query.order_by(BlockedUsers.userID) # Initialize usernames dictionary usernames = {u.userID: 'Error, cannot get username' for u in blocked_users} for key in usernames.keys(): # Fetch usernames from GitHub API try: api_url = requests.get( 'https://api.github.com/user/{}'.format(key), timeout=10) userdata = api_url.json() # Set values to the actual usernames if no errors usernames[key] = userdata['login'] except requests.exceptions.RequestException: break # Define addUserForm processing add_user_form = AddUsersToBlacklist() if add_user_form.validate_on_submit(): if BlockedUsers.query.filter_by( userID=add_user_form.userID.data).first() is not None: flash('User already blocked.') return redirect(url_for('.blocked_users')) blocked_user = BlockedUsers(add_user_form.userID.data, add_user_form.comment.data) g.db.add(blocked_user) g.db.commit() flash('User blocked successfully.') try: # Remove any queued pull request from blocked user gh = GitHub(access_token=g.github['bot_token']) repository = gh.repos(g.github['repository_owner'])( g.github['repository']) # Getting all pull requests by blocked user on the repo pulls = repository.pulls.get() for pull in pulls: if pull['user']['id'] != add_user_form.userID.data: continue tests = Test.query.filter(Test.pr_nr == pull['number']).all() for test in tests: # Add canceled status only if the test hasn't started yet if len(test.progress) > 0: continue progress = TestProgress(test.id, TestStatus.canceled, "PR closed", datetime.datetime.now()) g.db.add(progress) g.db.commit() try: repository.statuses(test.commit).post( state=Status.FAILURE, description="Tests canceled since user blacklisted", context="CI - {name}".format( name=test.platform.value), target_url=url_for('test.by_id', test_id=test.id, _external=True)) except ApiError as a: g.log.error( 'Got an exception while posting to GitHub! Message: {message}' .format(message=a.message)) except ApiError as a: g.log.error( 'Pull Requests of Blocked User could not be fetched: {res}'. format(res=a.response)) return redirect(url_for('.blocked_users')) # Define removeUserForm processing remove_user_form = RemoveUsersFromBlacklist() if remove_user_form.validate_on_submit(): blocked_user = BlockedUsers.query.filter_by( userID=remove_user_form.userID.data).first() if blocked_user is None: flash('No such user in Blacklist') return redirect(url_for('.blocked_users')) g.db.remove(blocked_user) g.db.commit() flash('User removed successfully.') return redirect(url_for('.blocked_users')) return { 'addUserForm': add_user_form, 'removeUserForm': remove_user_form, 'blocked_users': blocked_users }
def index(): """ Display a form to allow users to run tests. User can enter commit or select the commit from their repo that are not more than 30 days old. User can customized test based on selected regression tests and platforms. Also Display list of customized tests started by user. User will be redirected to the same page on submit. """ fork_test_form = TestForkForm(request.form) username = fetch_username_from_token() commit_options = False if username is not None: gh = GitHub(access_token=g.github['bot_token']) repository = gh.repos(username)(g.github['repository']) # Only commits since last month last_month = datetime.now() - timedelta(days=30) commit_since = last_month.isoformat() + 'Z' commits = repository.commits().get(since=commit_since) commit_arr = [] for commit in commits: commit_url = commit['html_url'] commit_sha = commit['sha'] commit_option = ( '<a href="{url}">{sha}</a>').format(url=commit_url, sha=commit_sha) commit_arr.append((commit_sha, commit_option)) # If there are commits present, display it on webpage if len(commit_arr) > 0: fork_test_form.commit_select.choices = commit_arr commit_options = True fork_test_form.regression_test.choices = [(regression_test.id, regression_test) for regression_test in RegressionTest.query.all()] if fork_test_form.add.data and fork_test_form.validate_on_submit(): import requests regression_tests = fork_test_form.regression_test.data commit_hash = fork_test_form.commit_hash.data repo = g.github['repository'] platforms = fork_test_form.platform.data api_url = ('https://api.github.com/repos/{user}/{repo}/commits/{hash}').format( user=username, repo=repo, hash=commit_hash ) # Show error if github fails to recognize commit response = requests.get(api_url) if response.status_code == 500: fork_test_form.commit_hash.errors.append('Error contacting Github') elif response.status_code != 200: fork_test_form.commit_hash.errors.append('Wrong Commit Hash') else: add_test_to_kvm(username, commit_hash, platforms, regression_tests) return redirect(url_for('custom.index')) populated_categories = g.db.query(regressionTestLinkTable.c.category_id).subquery() categories = Category.query.filter(Category.id.in_(populated_categories)).order_by(Category.name.asc()).all() tests = Test.query.filter(and_(TestFork.user_id == g.user.id, TestFork.test_id == Test.id)).order_by( Test.id.desc()).limit(50).all() return { 'addTestFork': fork_test_form, 'commit_options': commit_options, 'tests': tests, 'TestType': TestType, 'GitUser': username, 'categories': categories, 'customize': True }
def progress_reporter(test_id, token): """ Handle the progress of a certain test after validating the token. If necessary, update the status on GitHub. :param test_id: The id of the test to update. :type test_id: int :param token: The token to check the validity of the request. :type token: str :return: Nothing. :rtype: None """ from run import config, log # Verify token test = Test.query.filter(Test.id == test_id).first() if test is not None and test.token == token: repo_folder = config.get('SAMPLE_REPOSITORY', '') if 'type' in request.form: if request.form['type'] == 'progress': # Progress, log status = TestStatus.from_string(request.form['status']) # Check whether test is not running previous status again istatus = TestStatus.progress_step(status) message = request.form['message'] if len(test.progress) != 0: laststatus = TestStatus.progress_step( test.progress[-1].status) if laststatus in [ TestStatus.completed, TestStatus.canceled ]: return "FAIL" if laststatus > istatus: status = TestStatus.canceled message = "Duplicate Entries" progress = TestProgress(test.id, status, message) g.db.add(progress) g.db.commit() gh = GitHub(access_token=g.github['bot_token']) repository = gh.repos(g.github['repository_owner'])( g.github['repository']) # Store the test commit for testing in case of commit if status == TestStatus.completed: commit_name = 'fetch_commit_' + test.platform.value commit = GeneralData.query.filter( GeneralData.key == commit_name).first() fetch_commit = Test.query.filter( and_(Test.commit == commit.value, Test.platform == test.platform)).first() if test.test_type == TestType.commit and test.id > fetch_commit.id: commit.value = test.commit g.db.commit() # If status is complete, remove the Kvm entry if status in [TestStatus.completed, TestStatus.canceled]: log.debug("Test {id} has been {status}".format( id=test_id, status=status)) var_average = 'average_time_' + test.platform.value current_average = GeneralData.query.filter( GeneralData.key == var_average).first() average_time = 0 total_time = 0 if current_average is None: platform_tests = g.db.query(Test.id).filter( Test.platform == test.platform).subquery() finished_tests = g.db.query( TestProgress.test_id).filter( and_( TestProgress.status.in_([ TestStatus.canceled, TestStatus.completed ]), TestProgress.test_id.in_( platform_tests))).subquery() in_progress_statuses = [ TestStatus.preparation, TestStatus.completed, TestStatus.canceled ] finished_tests_progress = g.db.query( TestProgress).filter( and_( TestProgress.test_id.in_(finished_tests), TestProgress.status.in_( in_progress_statuses))).subquery() times = g.db.query( finished_tests_progress.c.test_id, label( 'time', func.group_concat( finished_tests_progress.c.timestamp)) ).group_by(finished_tests_progress.c.test_id).all() for p in times: parts = p.time.split(',') start = datetime.datetime.strptime( parts[0], '%Y-%m-%d %H:%M:%S') end = datetime.datetime.strptime( parts[-1], '%Y-%m-%d %H:%M:%S') total_time += (end - start).total_seconds() if len(times) != 0: average_time = total_time // len(times) new_avg = GeneralData(var_average, average_time) g.db.add(new_avg) g.db.commit() else: all_results = TestResult.query.count() regression_test_count = RegressionTest.query.count() number_test = all_results / regression_test_count updated_average = float( current_average.value) * (number_test - 1) pr = test.progress_data() end_time = pr['end'] start_time = pr['start'] if end_time.tzinfo is not None: end_time = end_time.replace(tzinfo=None) if start_time.tzinfo is not None: start_time = start_time.replace(tzinfo=None) last_running_test = end_time - start_time updated_average = updated_average + last_running_test.total_seconds( ) current_average.value = updated_average // number_test g.db.commit() kvm = Kvm.query.filter(Kvm.test_id == test_id).first() if kvm is not None: log.debug("Removing KVM entry") g.db.delete(kvm) g.db.commit() # Post status update state = Status.PENDING target_url = url_for('test.by_id', test_id=test.id, _external=True) context = "CI - {name}".format(name=test.platform.value) if status == TestStatus.canceled: state = Status.ERROR message = 'Tests aborted due to an error; please check' elif status == TestStatus.completed: # Determine if success or failure # It fails if any of these happen: # - A crash (unexpected exit code) # - A not None value on the "got" of a TestResultFile ( # meaning the hashes do not match) crashes = g.db.query(count(TestResult.exit_code)).filter( and_(TestResult.test_id == test.id, TestResult.exit_code != TestResult.expected_rc)).scalar() results_zero_rc = g.db.query(RegressionTest.id).filter( RegressionTest.expected_rc == 0).subquery() results = g.db.query(count(TestResultFile.got)).filter( and_( TestResultFile.test_id == test.id, TestResultFile.regression_test_id.in_( results_zero_rc), TestResultFile.got.isnot(None))).scalar() log.debug( 'Test {id} completed: {crashes} crashes, {results} results' .format(id=test.id, crashes=crashes, results=results)) if crashes > 0 or results > 0: state = Status.FAILURE message = 'Not all tests completed successfully, please check' else: state = Status.SUCCESS message = 'Tests completed' update_build_badge(state, test) else: message = progress.message gh_commit = repository.statuses(test.commit) try: gh_commit.post(state=state, description=message, context=context, target_url=target_url) except ApiError as a: log.error( 'Got an exception while posting to GitHub! Message: {message}' .format(message=a.message)) if status in [TestStatus.completed, TestStatus.canceled]: # Start next test if necessary, on the same platform process = Process(target=start_platform, args=(g.db, repository, 60)) process.start() elif request.form['type'] == 'equality': log.debug('Equality for {t}/{rt}/{rto}'.format( t=test_id, rt=request.form['test_id'], rto=request.form['test_file_id'])) rto = RegressionTestOutput.query.filter( RegressionTestOutput.id == request.form['test_file_id']).first() if rto is None: # Equality posted on a file that's ignored presumably log.info('No rto for {test_id}: {test}'.format( test_id=test_id, test=request.form['test_id'])) else: result_file = TestResultFile(test.id, request.form['test_id'], rto.id, rto.correct) g.db.add(result_file) g.db.commit() elif request.form['type'] == 'logupload': log.debug("Received log file for test {id}".format(id=test_id)) # File upload, process if 'file' in request.files: uploaded_file = request.files['file'] filename = secure_filename(uploaded_file.filename) if filename is '': return 'EMPTY' temp_path = os.path.join(repo_folder, 'TempFiles', filename) # Save to temporary location uploaded_file.save(temp_path) final_path = os.path.join( repo_folder, 'LogFiles', '{id}{ext}'.format(id=test.id, ext='.txt')) os.rename(temp_path, final_path) log.debug("Stored log file") elif request.form['type'] == 'upload': log.debug('Upload for {t}/{rt}/{rto}'.format( t=test_id, rt=request.form['test_id'], rto=request.form['test_file_id'])) # File upload, process if 'file' in request.files: uploaded_file = request.files['file'] filename = secure_filename(uploaded_file.filename) if filename is '': return 'EMPTY' temp_path = os.path.join(repo_folder, 'TempFiles', filename) # Save to temporary location uploaded_file.save(temp_path) # Get hash and check if it's already been submitted hash_sha256 = hashlib.sha256() with open(temp_path, "rb") as f: for chunk in iter(lambda: f.read(4096), b""): hash_sha256.update(chunk) file_hash = hash_sha256.hexdigest() filename, file_extension = os.path.splitext(filename) final_path = os.path.join( repo_folder, 'TestResults', '{hash}{ext}'.format(hash=file_hash, ext=file_extension)) os.rename(temp_path, final_path) rto = RegressionTestOutput.query.filter( RegressionTestOutput.id == request.form['test_file_id']).first() result_file = TestResultFile(test.id, request.form['test_id'], rto.id, rto.correct, file_hash) g.db.add(result_file) g.db.commit() elif request.form['type'] == 'finish': log.debug('Finish for {t}/{rt}'.format( t=test_id, rt=request.form['test_id'])) regression_test = RegressionTest.query.filter( RegressionTest.id == request.form['test_id']).first() result = TestResult(test.id, regression_test.id, request.form['runTime'], request.form['exitCode'], regression_test.expected_rc) g.db.add(result) try: g.db.commit() except IntegrityError as e: log.error('Could not save the results: {msg}'.format( msg=e.message)) return "OK" return "FAIL"
#import cgitb; cgitb.enable() sys.path.insert(0, os.path.expanduser('~/site/python')) import json from pymongo import * from github import GitHub from secrets import github_username, github_password print ("Content-Type: text/html; charset=utf-8") print ("") print("hello!!!") gh = GitHub(username=github_username, password=github_password) issuesdict = gh.repos("vgulaev")("trimet_it").issues.get(sort="created", filter="all") #print(issuesdict[0].number) def getnewissues(start_id): client = MongoClient() db = client['trimet_issues'] posts = db.issues issues_in_db = posts.find().sort("number", direction = DESCENDING) if db['issues'].count() > 0: print(issues_in_db[0]["number"]) i_from = issues_in_db[0]["number"] + 1 else: i_from = 1
print line splitlist = line.split('/', 5) repo_owner = splitlist[3] repo_name = splitlist[4].split('.git\n', 1)[0] if 'GitFetch' in line: branch_path = line.split('/', 3)[2].rstrip() print 'BRANCH', branch_path if 'Directory' in line: tag = (line.split(' ', 1)[1]).rstrip() print 'TAG', tag file_path = tag + '/Dockerfile' print 'FILE PATH', file_path print repo_owner, repo_name if (branch_path == ''): file_git = gitapi.repos(repo_owner)(repo_name).contents( file_path).get() else: file_git = gitapi.repos(repo_owner)(repo_name).contents( file_path + '?ref=' + branch_path).get() download_file = urllib.URLopener() download_file.retrieve( file_git.download_url, './downloads/Dockerfile_' + repo_name + '_' + tag.replace('/', '_')) count = count + 1 print count line = fo.readline() branch_path = '' fo.close()
if member["bioguide_id"] == "": readme.write(row_template_no_bg.format(**member)) elif member["bioguide_id"] in websites: member["website"] = websites[member["bioguide_id"]] readme.write(row_template.format(**member)) else: readme.write(row_template_no_web.format(**member)) if args.issues: print("Only create github issues if you're sure you're ready") print("The line actually creating issues is commented out to protect you from yourself") print("So go uncomment it when you're really ready") username = raw_input("Github username:"******"Github password:"******"[%s] Rep. %s" % (m, new_reps[m]) body = "Newly elected to 114th congress" #repo.issues.post(title=title, body=body) for m in new_senators: title = "[%s] Sen. %s" % (m, new_reps[m]) body = "Newly elected to 114th congress" #repo.issues.post(title=title, body=body) #do we also want to create them for existing legislators in case they change anything? ##the decision seems to be no for existing legislators right now.