def trusted_repo(info): # We are opening the file each time, so we can update it without restarting # the service. trusted = {} # Lets fetch the file from the master repo if it exists, otherwise we will # use our local copy. try: result = requests.get(TRUSTED_REPO_REMOTE) if result.status_code == 200: trusted = yaml.load(result.text) else: if os.path.exists(TRUSTED_REPO_FN) and \ os.path.isfile(TRUSTED_REPO_FN): with open(TRUSTED_REPO_FN, 'r') as tdata: trusted = yaml.load(tdata.read()) if info['clone'] in trusted['REPOS']: _create_status(info["repo"], info['sha'], 'success', 'Repo trusted', 'CI permissions') return True else: _create_status(info["repo"], info['sha'], 'failure', 'Repo untrusted', 'CI permissions') except Exception as e: _p('Unable to retrieve trusted repo list! %s' % str(e)) _create_status(info["repo"], info['sha'], 'failure', 'WL unavailable!', 'CI permissions') return False
def _post_with_retries(url, data, auth): for i in range(0, 10): try: r = requests.post(url, auth=auth, json=data) return r except requests.ConnectionError as ce: _p("ConnectionError to (post) %s : message(%s)" % (url, str(ce))) _p("Trying again in 1 second") time.sleep(1)
def request_queue(): while testlib.RUN.value: # noinspection PyBroadException try: info = req_q.get(True, 3) run_tests(info) except Queue.Empty: pass except Exception: st = traceback.format_exc() _p("request_queue: unexpected exception: %s" % (st))
def _create_status(repo, sha1, state, desc, context, log_url=None): if '/' not in repo: raise Exception("Expecting repo to be in form user/repo %s" % repo) url = 'https://api.github.com/repos/%s/statuses/%s' % (repo, sha1) data = {'state': state, "description": desc, "context": context} if log_url: data["target_url"] = log_url r = _post_with_retries(url, data, (HOST, TOKEN)) if r.status_code == 201: _p('We updated status %s' % str(data)) else: _print_error(r, "Unexpected error on setting status ")
def _create_status(repo, sha1, state, desc, context, log_url=None): if '/' not in repo: raise Exception("Expecting repo to be in form user/repo %s" % repo) url = 'https://api.github.com/repos/%s/statuses/%s' % (repo, sha1) data = {'state': state, "description": desc, "context": context} if log_url: data["target_url"] = log_url r = _post_with_retries(url, data, (USER, TOKEN)) if r.status_code == 201: _p('We updated status url=%s data=%s' % (str(url), str(data))) else: _print_error(r, "Unexpected error on setting status ")
def e_handler(): global test_count # Check secret before we do *anything* if not _verify_signature(request.body.read(), request.headers['X-Hub-Signature']): response.status = 500 return if request.headers['X-Github-Event'] == 'pull_request': repo = request.json["pull_request"]["base"]["repo"]["full_name"] clone = request.json["pull_request"]["head"]["repo"]["clone_url"] sha = request.json["pull_request"]['head']['sha'] branch = request.json["pull_request"]['head']['ref'] _p('Queuing unit tests for %s %s' % (clone, branch)) req_q.put( dict(repo=repo, sha=sha, branch=branch, clone=clone, test_run_id=test_count)) test_count += 1 else: _p("Got an unexpected header from github") for k, v in request.headers.items(): _p('%s:%s' % (str(k), str(v))) pp.pprint(request.json) sys.stdout.flush() response.status = 200
def request_queue(): global processing global processing_mutex while testlib.RUN.value: # noinspection PyBroadException try: info = req_q.get(True, 3) with processing_mutex: processing = info run_tests(info) with processing_mutex: processing = None except Queue.Empty: pass except Exception: st = traceback.format_exc() _p("request_queue: unexpected exception: %s" % (st))
def e_handler(): global test_count # Check secret before we do *anything* if not _verify_signature(request.body.read(), request.headers['X-Hub-Signature']): response.status = 500 return if request.headers['X-Github-Event'] == 'pull_request': repo = request.json["pull_request"]["base"]["repo"]["full_name"] clone = request.json["pull_request"]["head"]["repo"]["clone_url"] sha = request.json["pull_request"]['head']['sha'] branch = request.json["pull_request"]['head']['ref'] _p('Queuing unit tests for %s %s' % (clone, branch)) info = dict(repo=repo, sha=sha, branch=branch, clone=clone, test_run_id=test_count) # Lets immediately set something on the PR so that people looking at # the PR see that the service is aware of it. _create_status(info["repo"], info['sha'], 'pending', 'CI requested, #waiting = %d' % req_q.qsize(), 'CI permissions') req_q.put(info) test_count += 1 else: _p("Got an unexpected header from github") for k, v in request.headers.items(): _p('%s:%s' % (str(k), str(v))) pp.pprint(request.json) sys.stdout.flush() response.status = 200
def _print_error(req, msg): _p("%s status code = %d" % (msg, req.status_code)) pp.pprint(req.json()) sys.stdout.flush()
def run_tests(info): # As nodes can potentially come/go with errors we will get a list of what # we started with and will try to utilize them and only them for the # duration of the test connected_nodes = node_mgr.nodes() _p("Setting status @ github to pending") for n in connected_nodes: # Add status updates to github for all the arrays we will be testing # against arrays = n.arrays() # Set all the status for a in arrays: _create_status( info["repo"], info['sha'], "pending", 'Plugin = %s started @ %s' % (a[1], datetime.datetime.fromtimestamp( time.time()).strftime('%m/%d %H:%M:%S')), a[0]) _p('Starting the tests') # Start the tests for n in connected_nodes: arrays = n.arrays() for a in n.arrays(): job = n.start_test(info['clone'], info['branch'], a[0]) if job: _p("Test started for %s job = %s" % (a[0], job)) else: _create_status(info["repo"], info['sha'], "failure", 'Plugin = ' + a[1] + 'failed to start', a[0]) _p('Tests started') # Monitor and report status as they are completed all_done = False while not all_done: all_done = True for n in connected_nodes: # Get the jobs job_list = n.jobs() for r in job_list: job_id = r['JOB_ID'] array_id = r['ID'] status = r['STATUS'] plugin = r['PLUGIN'] if status == 'RUNNING': all_done = False else: if status == 'SUCCESS': _create_status(info["repo"], info['sha'], 'success', 'Plugin = ' + plugin, array_id) info['status'] = 'SUCCESS' else: url = '%s/%s.html' % (CI_SERVICE_URL, job_id) info['status'] = url # Fetch the error log _log_write(n, job_id) _create_status(info["repo"], info['sha'], 'failure', 'Plugin = ' + plugin, array_id, url) # Delete the jobs n.job_delete(job_id) time.sleep(5) work_log.append(info) _p('Test run completed')
def run_tests(info): # As nodes can potentially come/go with errors we will get a list of what # we started with and will try to utilize them and only them for the # duration of the test connected_nodes = node_mgr.nodes() # Lets do a whitelist check, to ensure only those users who we trust are # going to get automated unit tests run. if not trusted_repo(info): return _p("Setting status @ github to pending") for n in connected_nodes: # Add status updates to github for all the arrays we will be testing # against arrays = n.arrays() # Set all the status for a in arrays: _create_status(info["repo"], info['sha'], "pending", 'Plugin = %s started @ %s' % (a[1], datetime.datetime.fromtimestamp( time.time()).strftime('%m/%d %H:%M:%S')), a[0]) _p('Starting the tests') # Start the tests for n in connected_nodes: arrays = n.arrays() for a in n.arrays(): job = n.start_test(info['clone'], info['branch'], a[0]) if job: _p("Test started for %s job = %s" % (a[0], job)) else: _create_status(info["repo"], info['sha'], "failure", 'Plugin = ' + a[1] + 'failed to start', a[0]) _p('Tests started') # Monitor and report status as they are completed all_done = False while not all_done: all_done = True for n in connected_nodes: # Get the jobs job_list = n.jobs() for r in job_list: job_id = r['JOB_ID'] array_id = r['ID'] status = r['STATUS'] plugin = r['PLUGIN'] if status == 'RUNNING': all_done = False else: if status == 'SUCCESS': _create_status(info["repo"], info['sha'], 'success', 'Plugin = ' + plugin, array_id) info['status'] = 'SUCCESS' # Delete the jobs n.job_delete(job_id) else: url = '%s/%s.html' % (CI_SERVICE_URL, job_id) info['status'] = url # Fetch the error log, log write will delete the job # if it was able to retrieve the log data! _log_write(n, job_id) _create_status(info["repo"], info['sha'], 'failure', 'Plugin = ' + plugin, array_id, url) time.sleep(5) work_log.append(info) _p('Test run completed')