def cron(): from mod_ci.controllers import start_platform from run import config, log from database import create_session from github import GitHub log.info('Run the cron for kicking off CI platform(s).') # Create session db = create_session(config['DATABASE_URI']) gh = GitHub(access_token=config['GITHUB_TOKEN']) repository = gh.repos(config['GITHUB_OWNER'])(config['GITHUB_REPOSITORY']) start_platform(db, repository)
def cron(testing=False): """Script to run from cron for Sampleplatform.""" from mod_ci.controllers import start_platforms, kvm_processor, TestPlatform from flask import current_app from run import config, log from database import create_session from github import GitHub log.info('Run the cron for kicking off CI platform(s).') # Create session db = create_session(config['DATABASE_URI']) gh = GitHub(access_token=config['GITHUB_TOKEN']) repository = gh.repos(config['GITHUB_OWNER'])(config['GITHUB_REPOSITORY']) if testing is True: kvm_processor(current_app._get_current_object(), db, config.get('KVM_LINUX_NAME', ''), TestPlatform.linux, repository, None) else: start_platforms(db, repository)
def progress_reporter(test_id, token): """ Handle the progress of a certain test after validating the token. If necessary, update the status on GitHub. :param test_id: The id of the test to update. :type test_id: int :param token: The token to check the validity of the request. :type token: str :return: Nothing. :rtype: None """ from run import config, log # Verify token test = Test.query.filter(Test.id == test_id).first() if test is not None and test.token == token: repo_folder = config.get('SAMPLE_REPOSITORY', '') if 'type' in request.form: if request.form['type'] == 'progress': # Progress, log status = TestStatus.from_string(request.form['status']) # Check whether test is not running previous status again istatus = TestStatus.progress_step(status) message = request.form['message'] if len(test.progress) != 0: laststatus = TestStatus.progress_step( test.progress[-1].status) if laststatus in [ TestStatus.completed, TestStatus.canceled ]: return "FAIL" if laststatus > istatus: status = TestStatus.canceled message = "Duplicate Entries" progress = TestProgress(test.id, status, message) g.db.add(progress) g.db.commit() gh = GitHub(access_token=g.github['bot_token']) repository = gh.repos(g.github['repository_owner'])( g.github['repository']) # Store the test commit for testing in case of commit if status == TestStatus.completed: commit_name = 'fetch_commit_' + test.platform.value commit = GeneralData.query.filter( GeneralData.key == commit_name).first() fetch_commit = Test.query.filter( and_(Test.commit == commit.value, Test.platform == test.platform)).first() if test.test_type == TestType.commit and test.id > fetch_commit.id: commit.value = test.commit g.db.commit() # If status is complete, remove the Kvm entry if status in [TestStatus.completed, TestStatus.canceled]: log.debug("Test {id} has been {status}".format( id=test_id, status=status)) var_average = 'average_time_' + test.platform.value current_average = GeneralData.query.filter( GeneralData.key == var_average).first() average_time = 0 total_time = 0 if current_average is None: platform_tests = g.db.query(Test.id).filter( Test.platform == test.platform).subquery() finished_tests = g.db.query( TestProgress.test_id).filter( and_( TestProgress.status.in_([ TestStatus.canceled, TestStatus.completed ]), TestProgress.test_id.in_( platform_tests))).subquery() in_progress_statuses = [ TestStatus.preparation, TestStatus.completed, TestStatus.canceled ] finished_tests_progress = g.db.query( TestProgress).filter( and_( TestProgress.test_id.in_(finished_tests), TestProgress.status.in_( in_progress_statuses))).subquery() times = g.db.query( finished_tests_progress.c.test_id, label( 'time', func.group_concat( finished_tests_progress.c.timestamp)) ).group_by(finished_tests_progress.c.test_id).all() for p in times: parts = p.time.split(',') start = datetime.datetime.strptime( parts[0], '%Y-%m-%d %H:%M:%S') end = datetime.datetime.strptime( parts[-1], '%Y-%m-%d %H:%M:%S') total_time += (end - start).total_seconds() if len(times) != 0: average_time = total_time // len(times) new_avg = GeneralData(var_average, average_time) g.db.add(new_avg) g.db.commit() else: all_results = TestResult.query.count() regression_test_count = RegressionTest.query.count() number_test = all_results / regression_test_count updated_average = float( current_average.value) * (number_test - 1) pr = test.progress_data() end_time = pr['end'] start_time = pr['start'] if end_time.tzinfo is not None: end_time = end_time.replace(tzinfo=None) if start_time.tzinfo is not None: start_time = start_time.replace(tzinfo=None) last_running_test = end_time - start_time updated_average = updated_average + last_running_test.total_seconds( ) current_average.value = updated_average // number_test g.db.commit() kvm = Kvm.query.filter(Kvm.test_id == test_id).first() if kvm is not None: log.debug("Removing KVM entry") g.db.delete(kvm) g.db.commit() # Post status update state = Status.PENDING target_url = url_for('test.by_id', test_id=test.id, _external=True) context = "CI - {name}".format(name=test.platform.value) if status == TestStatus.canceled: state = Status.ERROR message = 'Tests aborted due to an error; please check' elif status == TestStatus.completed: # Determine if success or failure # It fails if any of these happen: # - A crash (unexpected exit code) # - A not None value on the "got" of a TestResultFile ( # meaning the hashes do not match) crashes = g.db.query(count(TestResult.exit_code)).filter( and_(TestResult.test_id == test.id, TestResult.exit_code != TestResult.expected_rc)).scalar() results_zero_rc = g.db.query(RegressionTest.id).filter( RegressionTest.expected_rc == 0).subquery() results = g.db.query(count(TestResultFile.got)).filter( and_( TestResultFile.test_id == test.id, TestResultFile.regression_test_id.in_( results_zero_rc), TestResultFile.got.isnot(None))).scalar() log.debug( 'Test {id} completed: {crashes} crashes, {results} results' .format(id=test.id, crashes=crashes, results=results)) if crashes > 0 or results > 0: state = Status.FAILURE message = 'Not all tests completed successfully, please check' else: state = Status.SUCCESS message = 'Tests completed' update_build_badge(state, test) else: message = progress.message gh_commit = repository.statuses(test.commit) try: gh_commit.post(state=state, description=message, context=context, target_url=target_url) except ApiError as a: log.error( 'Got an exception while posting to GitHub! Message: {message}' .format(message=a.message)) if status in [TestStatus.completed, TestStatus.canceled]: # Start next test if necessary, on the same platform process = Process(target=start_platform, args=(g.db, repository, 60)) process.start() elif request.form['type'] == 'equality': log.debug('Equality for {t}/{rt}/{rto}'.format( t=test_id, rt=request.form['test_id'], rto=request.form['test_file_id'])) rto = RegressionTestOutput.query.filter( RegressionTestOutput.id == request.form['test_file_id']).first() if rto is None: # Equality posted on a file that's ignored presumably log.info('No rto for {test_id}: {test}'.format( test_id=test_id, test=request.form['test_id'])) else: result_file = TestResultFile(test.id, request.form['test_id'], rto.id, rto.correct) g.db.add(result_file) g.db.commit() elif request.form['type'] == 'logupload': log.debug("Received log file for test {id}".format(id=test_id)) # File upload, process if 'file' in request.files: uploaded_file = request.files['file'] filename = secure_filename(uploaded_file.filename) if filename is '': return 'EMPTY' temp_path = os.path.join(repo_folder, 'TempFiles', filename) # Save to temporary location uploaded_file.save(temp_path) final_path = os.path.join( repo_folder, 'LogFiles', '{id}{ext}'.format(id=test.id, ext='.txt')) os.rename(temp_path, final_path) log.debug("Stored log file") elif request.form['type'] == 'upload': log.debug('Upload for {t}/{rt}/{rto}'.format( t=test_id, rt=request.form['test_id'], rto=request.form['test_file_id'])) # File upload, process if 'file' in request.files: uploaded_file = request.files['file'] filename = secure_filename(uploaded_file.filename) if filename is '': return 'EMPTY' temp_path = os.path.join(repo_folder, 'TempFiles', filename) # Save to temporary location uploaded_file.save(temp_path) # Get hash and check if it's already been submitted hash_sha256 = hashlib.sha256() with open(temp_path, "rb") as f: for chunk in iter(lambda: f.read(4096), b""): hash_sha256.update(chunk) file_hash = hash_sha256.hexdigest() filename, file_extension = os.path.splitext(filename) final_path = os.path.join( repo_folder, 'TestResults', '{hash}{ext}'.format(hash=file_hash, ext=file_extension)) os.rename(temp_path, final_path) rto = RegressionTestOutput.query.filter( RegressionTestOutput.id == request.form['test_file_id']).first() result_file = TestResultFile(test.id, request.form['test_id'], rto.id, rto.correct, file_hash) g.db.add(result_file) g.db.commit() elif request.form['type'] == 'finish': log.debug('Finish for {t}/{rt}'.format( t=test_id, rt=request.form['test_id'])) regression_test = RegressionTest.query.filter( RegressionTest.id == request.form['test_id']).first() result = TestResult(test.id, regression_test.id, request.form['runTime'], request.form['exitCode'], regression_test.expected_rc) g.db.add(result) try: g.db.commit() except IntegrityError as e: log.error('Could not save the results: {msg}'.format( msg=e.message)) return "OK" return "FAIL"
def kvm_processor(db, kvm_name, platform, repository, delay): """ Checks whether there is no already running same kvm. Checks whether machine is in maintenance mode or not Launch kvm if not used by any other test Creates testing xml files to test the change in main repo. Creates clone with separate branch and merge pr into it. """ from run import config, log, app log.info("[{platform}] Running kvm_processor".format(platform=platform)) if kvm_name == "": log.critical('[{platform}] KVM name is empty!') return if delay is not None: import time log.debug('[{platform}] Sleeping for {time} seconds'.format( platform=platform, time=delay)) time.sleep(delay) maintenance_mode = MaintenanceMode.query.filter( MaintenanceMode.platform == platform).first() if maintenance_mode is not None and maintenance_mode.disabled: log.debug('[{platform}] In maintenance mode! Waiting...').format( platform=platform) return # Open connection to libvirt conn = libvirt.open("qemu:///system") if conn is None: log.critical( "[{platform}] Couldn't open connection to libvirt!".format( platform=platform)) return try: vm = conn.lookupByName(kvm_name) except libvirt.libvirtError: log.critical("[{platform}] No VM named {name} found!".format( platform=platform, name=kvm_name)) return vm_info = vm.info() if vm_info[0] != libvirt.VIR_DOMAIN_SHUTOFF: # Running, check expiry (2 hours runtime max) status = Kvm.query.filter(Kvm.name == kvm_name).first() max_runtime = config.get("KVM_MAX_RUNTIME", 120) if status is not None: if datetime.datetime.now( ) - status.timestamp >= datetime.timedelta(minutes=max_runtime): # Mark entry as aborted test_progress = TestProgress(status.test.id, TestStatus.canceled, 'Runtime exceeded') db.add(test_progress) db.delete(status) db.commit() # Abort process if vm.destroy() == -1: # Failed to shut down log.critical( "[{platform}] Failed to shut down {name}".format( platform=platform, name=kvm_name)) return else: log.info("[{platform}] Current job not expired yet.".format( platform=platform)) return else: log.warn( "[{platform}] No task, but VM is running! Hard reset necessary" .format(platform=platform)) if vm.destroy() == -1: # Failed to shut down log.critical("[{platform}] Failed to shut down {name}".format( platform=platform, name=kvm_name)) return # Check if there's no KVM status left status = Kvm.query.filter(Kvm.name == kvm_name).first() if status is not None: log.warn( "[{platform}] KVM is powered off, but test {id} still present". format(platform=platform, id=status.test.id)) db.delete(status) db.commit() # Get oldest test for this platform finished_tests = db.query(TestProgress.test_id).filter( TestProgress.status.in_([TestStatus.canceled, TestStatus.completed])).subquery() test = Test.query.filter( and_(Test.id.notin_(finished_tests), Test.platform == platform)).order_by(Test.id.asc()).first() if test is None: log.info('[{platform}] No more tests to run, returning'.format( platform=platform)) return if test.test_type == TestType.pull_request and test.pr_nr == 0: log.warn('[{platform}] Test {id} is invalid, deleting'.format( platform=platform, id=test.id)) db.delete(test) db.commit() return # Reset to snapshot if vm.hasCurrentSnapshot() != 1: log.critical( "[{platform}] VM {name} has no current snapshot set!".format( platform=platform, name=kvm_name)) return snapshot = vm.snapshotCurrent() if vm.revertToSnapshot(snapshot) == -1: log.critical( "[{platform}] Failed to revert to {snapshot} for {name}".format( platform=platform, snapshot=snapshot.getName(), name=kvm_name)) return log.info("[{p}] Reverted to {snap} for {name}".format( p=platform, snap=snapshot.getName(), name=kvm_name)) log.debug('Starting test {id}'.format(id=test.id)) status = Kvm(kvm_name, test.id) # Prepare data # 0) Write url to file with app.app_context(): full_url = url_for('ci.progress_reporter', test_id=test.id, token=test.token, _external=True, _scheme="https") file_path = os.path.join(config.get('SAMPLE_REPOSITORY', ''), 'vm_data', kvm_name, 'reportURL') with open(file_path, 'w') as f: f.write(full_url) # 1) Generate test files base_folder = os.path.join(config.get('SAMPLE_REPOSITORY', ''), 'vm_data', kvm_name, 'ci-tests') categories = Category.query.order_by(Category.id.desc()).all() commit_name = 'fetch_commit_' + platform.value commit_hash = GeneralData.query.filter( GeneralData.key == commit_name).first().value last_commit = Test.query.filter( and_(Test.commit == commit_hash, Test.platform == platform)).first() log.debug("[{p}] We will compare against the results of test {id}".format( p=platform, id=last_commit.id)) # Init collection file multi_test = etree.Element('multitest') for category in categories: # Skip categories without tests if len(category.regression_tests) == 0: continue # Create XML file for test file_name = '{name}.xml'.format(name=category.name) single_test = etree.Element('tests') for regression_test in category.regression_tests: entry = etree.SubElement(single_test, 'entry', id=str(regression_test.id)) command = etree.SubElement(entry, 'command') command.text = regression_test.command input_node = etree.SubElement( entry, 'input', type=regression_test.input_type.value) # Need a path that is relative to the folder we provide inside the CI environment. input_node.text = regression_test.sample.filename output_node = etree.SubElement(entry, 'output') output_node.text = regression_test.output_type.value compare = etree.SubElement(entry, 'compare') last_files = TestResultFile.query.filter( and_(TestResultFile.test_id == last_commit.id, TestResultFile.regression_test_id == regression_test.id)).subquery() for output_file in regression_test.output_files: ignore_file = str(output_file.ignore).lower() file_node = etree.SubElement(compare, 'file', ignore=ignore_file, id=str(output_file.id)) last_commit_files = db.query(last_files.c.got).filter( and_( last_files.c.regression_test_output_id == output_file.id, last_files.c.got.isnot(None))).first() correct = etree.SubElement(file_node, 'correct') # Need a path that is relative to the folder we provide inside the CI environment. if last_commit_files is None: correct.text = output_file.filename_correct else: correct.text = output_file.create_correct_filename( last_commit_files[0]) expected = etree.SubElement(file_node, 'expected') expected.text = output_file.filename_expected( regression_test.sample.sha) # Save XML single_test.getroottree().write(os.path.join(base_folder, file_name), encoding='utf-8', xml_declaration=True, pretty_print=True) # Append to collection file test_file = etree.SubElement(multi_test, 'testfile') location = etree.SubElement(test_file, 'location') location.text = file_name # Save collection file multi_test.getroottree().write(os.path.join(base_folder, 'TestAll.xml'), encoding='utf-8', xml_declaration=True, pretty_print=True) # 2) Create git repo clone and merge PR into it (if necessary) try: repo = Repo( os.path.join(config.get('SAMPLE_REPOSITORY', ''), 'vm_data', kvm_name, 'unsafe-ccextractor')) except InvalidGitRepositoryError: log.critical( "[{platform}] Could not open CCExtractor's repository copy!". format(platform=platform)) return # Return to master repo.heads.master.checkout(True) # Update repository from upstream try: origin = repo.remote('origin') except ValueError: log.critical("[{platform}] Origin remote doesn't exist!".format( platform=platform)) return fetch_info = origin.fetch() if len(fetch_info) == 0: log.info( '[{platform}] Fetch from remote returned no new data...'.format( platform=platform)) # Pull code (finally) pull_info = origin.pull() if len(pull_info) == 0: log.info( "[{platform}] Pull from remote returned no new data...".format( platform=platform)) if pull_info[0].flags > 128: log.critical( "[{platform}] Didn't pull any information from remote: {flags}!". format(platform=platform, flags=pull_info[0].flags)) return # Delete the test branch if it exists, and recreate try: repo.delete_head('CI_Branch', force=True) except GitCommandError: log.warn("[{platform}] Could not delete CI_Branch head".format( platform=platform)) # Remove possible left rebase-apply directory try: shutil.rmtree( os.path.join(config.get('SAMPLE_REPOSITORY', ''), 'unsafe-ccextractor', '.git', 'rebase-apply')) except OSError: log.warn("[{platform}] Could not delete rebase-apply".format( platform=platform)) # If PR, merge, otherwise reset to commit if test.test_type == TestType.pull_request: # Fetch PR (stored under origin/pull/<id>/head pull_info = origin.fetch( 'pull/{id}/head:CI_Branch'.format(id=test.pr_nr)) if len(pull_info) == 0: log.warn( "[{platform}] Didn't pull any information from remote PR!". format(platform=platform)) if pull_info[0].flags > 128: log.critical( "[{platform}] Didn't pull any information from remote PR: {flags}!" .format(platform=platform, flags=pull_info[0].flags)) return try: test_branch = repo.heads['CI_Branch'] except IndexError: log.critical('CI_Branch does not exist') return test_branch.checkout(True) # TODO: check what happens on merge conflicts # Rebase on master # try: # repo.git.rebase('master') # except GitCommandError: # progress = TestProgress( # test.id, TestStatus.preparation, # 'Rebase on master' # ) # db.add(progress) # progress = TestProgress( # test.id, TestStatus.canceled, # 'Merge conflict, please resolve.' # ) # db.add(progress) # db.commit() # # Report back # gh_commit = repository.statuses(test.commit) # # with app.app_context(): # target_url = url_for( # 'test.by_id', test_id=test.id, _external=True) # context = "CI - %s" % test.platform.value # gh_commit.post( # state=Status.ERROR, description='Failed to rebase', # context=context, target_url=target_url) # # Return, so next one can be handled # return else: test_branch = repo.create_head('CI_Branch', 'HEAD') test_branch.checkout(True) try: repo.head.reset(test.commit, working_tree=True) except GitCommandError: log.warn( "[{platform}] Commit {hash} for test {id} does not exist!". format(platform=platform, hash=test.commit, id=test.id)) return # Power on machine try: vm.create() db.add(status) db.commit() except libvirt.libvirtError: log.critical("[{platform}] Failed to launch VM {name}".format( platform=platform, name=kvm_name)) except IntegrityError: log.warn("[{platform}] Duplicate entry for {id}".format( platform=platform, id=test.id))
#!/usr/bin/python import sys from os import path # Need to append server root path to ensure we can import the necessary files. sys.path.append(path.dirname(path.dirname(path.abspath(__file__)))) # noinspection PyPep8 from mod_ci.controllers import start_ci_vm # noinspection PyPep8 from run import config, log # noinspection PyPep8 from database import create_session # noinspection PyPep8 from github import GitHub log.info('Running cron.py CI scheduler') # Create session db = create_session(config['DATABASE_URI']) gh = GitHub(access_token=config['GITHUB_TOKEN']) repository = gh.repos(config['GITHUB_OWNER'])(config['GITHUB_REPOSITORY']) # Kick off start_ci_vm start_ci_vm(db, repository)
def kvm_processor(db, kvm_name, platform, repository, delay): from run import config, log, app if kvm_name == "": log.critical('KVM name is empty!') return if delay is not None: import time log.debug('Sleeping for {time} seconds'.format(time=delay)) time.sleep(delay) # Open connection to libvirt conn = libvirt.open("qemu:///system") if conn is None: log.critical("Couldn't open connection to libvirt!") return try: vm = conn.lookupByName(kvm_name) except libvirt.libvirtError: log.critical("Couldn't find the Linux CI machine named %s" % kvm_name) return vm_info = vm.info() if vm_info[0] != libvirt.VIR_DOMAIN_SHUTOFF: # Running, check expiry (2 hours runtime max) status = Kvm.query.filter(Kvm.name == kvm_name).first() max_runtime = config.get("KVM_MAX_RUNTIME", 120) if status is not None: if datetime.datetime.now() >= status.timestamp + \ datetime.timedelta(minutes=max_runtime): # Mark entry as aborted test_progress = TestProgress(status.test.id, TestStatus.canceled, 'Runtime exceeded') db.add(test_progress) db.delete(status) db.commit() # Abort process if vm.destroy() == -1: # Failed to shut down log.critical("Failed to shut down %s" % kvm_name) return else: log.info("Current job is still running and not expired") return else: log.warn("No currently running task, but VM is running! Hard " "reset necessary") if vm.destroy() == -1: # Failed to shut down log.critical("Failed to shut down %s" % kvm_name) return # Check if there's no KVM status left status = Kvm.query.filter(Kvm.name == kvm_name).first() if status is not None: log.warn("KVM is powered off, but test is still in there: %s" % status.test.id) db.delete(status) db.commit() # Get oldest test for this platform finished_tests = db.query(TestProgress.test_id).filter( TestProgress.status.in_([TestStatus.canceled, TestStatus.completed])).subquery() test = Test.query.filter( and_(Test.id.notin_(finished_tests), Test.platform == platform)).order_by(Test.id.asc()).first() if test is None: log.info('No more tests to run, returning') return if test.test_type == TestType.pull_request and test.pr_nr == 0: log.warn('Got an invalid test with number %s, deleting' % test.id) db.delete(test) db.commit() return # Reset to snapshot if vm.hasCurrentSnapshot() != 1: log.critical("VM %s has no current snapshot set!" % kvm_name) return snapshot = vm.snapshotCurrent() if vm.revertToSnapshot(snapshot) == -1: log.critical("Failed to revert to snapshot %s for VM %s" % (snapshot.getName(), kvm_name)) return log.info('Reverted to snapshot %s for VM %s' % (snapshot.getName(), kvm_name)) log.debug('Starting test %s' % test.id) status = Kvm(kvm_name, test.id) # Prepare data # 0) Write url to file with app.app_context(): full_url = url_for('ci.progress_reporter', test_id=test.id, token=test.token, _external=True, _scheme="https") file_path = os.path.join(config.get('SAMPLE_REPOSITORY', ''), 'reportURL') with open(file_path, 'w') as f: f.write(full_url) # 1) Generate test files base_folder = os.path.join(config.get('SAMPLE_REPOSITORY', ''), 'ci-tests') categories = Category.query.order_by(Category.id.desc()).all() # Init collection file multi_test = etree.Element('multitest') for category in categories: if len(category.regression_tests) == 0: # Skip categories without tests continue # Create XML file for test file_name = '{name}.xml'.format(name=category.name) single_test = etree.Element('tests') for regression_test in category.regression_tests: entry = etree.SubElement(single_test, 'entry', id=str(regression_test.id)) command = etree.SubElement(entry, 'command') command.text = regression_test.command input_node = etree.SubElement( entry, 'input', type=regression_test.input_type.value) # Need a path that is relative to the folder we provide # inside the CI environment. input_node.text = regression_test.sample.filename output_node = etree.SubElement(entry, 'output') output_node.text = regression_test.output_type.value compare = etree.SubElement(entry, 'compare') for output_file in regression_test.output_files: file_node = etree.SubElement( compare, 'file', ignore='true' if output_file.ignore else 'false', id=str(output_file.id)) correct = etree.SubElement(file_node, 'correct') # Need a path that is relative to the folder we provide # inside the CI environment. correct.text = output_file.filename_correct expected = etree.SubElement(file_node, 'expected') expected.text = output_file.filename_expected( regression_test.sample.sha) # Save XML single_test.getroottree().write(os.path.join(base_folder, file_name), encoding='utf-8', xml_declaration=True, pretty_print=True) # Append to collection file test_file = etree.SubElement(multi_test, 'testfile') location = etree.SubElement(test_file, 'location') location.text = file_name # Save collection file multi_test.getroottree().write(os.path.join(base_folder, 'TestAll.xml'), encoding='utf-8', xml_declaration=True, pretty_print=True) # 2) Create git repo clone and merge PR into it (if necessary) try: repo = Repo( os.path.join(config.get('SAMPLE_REPOSITORY', ''), 'unsafe-ccextractor')) except InvalidGitRepositoryError: log.critical('Could not open CCExtractor\'s repository copy!') return # Return to master repo.heads.master.checkout(True) # Update repository from upstream try: origin = repo.remote('origin') except ValueError: log.critical('Origin remote doesn\'t exist!') return fetch_info = origin.fetch() if len(fetch_info) == 0: log.warn('No info fetched from remote!') # Pull code (finally) pull_info = origin.pull() if len(pull_info) == 0: log.warn('Didn\'t pull any information from remote!') if pull_info[0].flags > 128: log.critical('Didn\'t pull any information from remote: %s!' % pull_info[0].flags) return # Delete the test branch if it exists, and recreate try: repo.delete_head('CI_Branch', force=True) except GitCommandError: log.warn('Could not delete CI_Branch head') # Remove possible left rebase-apply directory try: shutil.rmtree( os.path.join(config.get('SAMPLE_REPOSITORY', ''), 'unsafe-ccextractor', '.git', 'rebase-apply')) except OSError: log.warn('Could not delete rebase-apply') # If PR, merge, otherwise reset to commit if test.test_type == TestType.pull_request: # Fetch PR (stored under origin/pull/<id>/head pull_info = origin.fetch( 'pull/{id}/head:CI_Branch'.format(id=test.pr_nr)) if len(pull_info) == 0: log.warn('Didn\'t pull any information from remote PR!') if pull_info[0].flags > 128: log.critical('Didn\'t pull any information from remote PR: %s!' % pull_info[0].flags) return try: test_branch = repo.heads['CI_Branch'] except IndexError: log.critical('CI_Branch does not exist') return # Check out branch test_branch.checkout(True) # Rebase on master # try: # repo.git.rebase('master') # except GitCommandError: # progress = TestProgress( # test.id, TestStatus.preparation, # 'Rebase on master' # ) # db.add(progress) # progress = TestProgress( # test.id, TestStatus.canceled, # 'Merge conflict, please resolve.' # ) # db.add(progress) # db.commit() # # Report back # gh_commit = repository.statuses(test.commit) # # with app.app_context(): # target_url = url_for( # 'test.by_id', test_id=test.id, _external=True) # context = "CI - %s" % test.platform.value # gh_commit.post( # state=Status.ERROR, description='Failed to rebase', # context=context, target_url=target_url) # # Return, so next one can be handled # return # TODO: check what happens on merge conflicts else: test_branch = repo.create_head('CI_Branch', 'HEAD') # Check out branch for test purposes test_branch.checkout(True) try: repo.head.reset(test.commit, working_tree=True) except GitCommandError: log.warn('Git commit %s (test %s) does not exist!' % (test.commit, test.id)) return # Power on machine try: vm.create() db.add(status) db.commit() except libvirt.libvirtError: log.critical("Failed to launch VM %s" % kvm_name) except IntegrityError: log.warn("Duplicate entry for %s" % test.id)
def progress_reporter(test_id, token): from run import config, log # Verify token test = Test.query.filter(Test.id == test_id).first() if test is not None and test.token == token: if 'type' in request.form: if request.form['type'] == 'progress': # Progress, log status = TestStatus.from_string(request.form['status']) progress = TestProgress(test.id, status, request.form['message']) g.db.add(progress) g.db.commit() gh = GitHub(access_token=g.github['bot_token']) repository = gh.repos(g.github['repository_owner'])( g.github['repository']) # If status is complete, remove the Kvm entry if status in [TestStatus.completed, TestStatus.canceled]: kvm = Kvm.query.filter(Kvm.test_id == test_id).first() if kvm is not None: g.db.delete(kvm) g.db.commit() # Start next test if necessary start_ci_vm(g.db, repository, 60) # Post status update state = Status.PENDING message = 'Tests queued' target_url = url_for('test.by_id', test_id=test.id, _external=True) context = "CI - %s" % test.platform.value if status == TestStatus.canceled: state = Status.ERROR message = 'Tests aborted due to an error; please check' elif status == TestStatus.completed: # Determine if success or failure # It fails if any of these happen: # - A crash (unexpected exit code) # - A not None value on the "got" of a TestResultFile ( # meaning the hashes do not match) crashes = g.db.query(count(TestResult.exit_code)).filter( and_(TestResult.test_id == test.id, TestResult.exit_code != TestResult.expected_rc)).scalar() results_zero_rc = g.db.query(RegressionTest.id).filter( RegressionTest.expected_rc == 0).subquery() results = g.db.query(count(TestResultFile.got)).filter( and_( TestResultFile.test_id == test.id, TestResultFile.regression_test_id.in_( results_zero_rc), TestResultFile.got.isnot(None))).scalar() log.debug( 'Test {id} completed: {crashes} crashes, {results} ' 'results'.format(id=test.id, crashes=crashes, results=results)) if crashes > 0 or results > 0: state = Status.FAILURE message = 'Not all tests completed successfully, ' \ 'please check' else: state = Status.SUCCESS message = 'Tests completed' else: message = progress.message gh_commit = repository.statuses(test.commit) gh_commit.post(state=state, description=message, context=context, target_url=target_url) elif request.form['type'] == 'equality': log.debug('Equality for {t}/{rt}/{rto}'.format( t=test_id, rt=request.form['test_id'], rto=request.form['test_file_id'])) rto = RegressionTestOutput.query.filter( RegressionTestOutput.id == request.form['test_file_id']).first() if rto is None: # Equality posted on a file that's ignored presumably log.info('No rto for {test_id}: {test}'.format( test_id=test_id, test=request.form['test_id'])) else: result_file = TestResultFile(test.id, request.form['test_id'], rto.id, rto.correct) g.db.add(result_file) g.db.commit() elif request.form['type'] == 'logupload': # File upload, process if 'file' in request.files: uploaded_file = request.files['file'] filename = secure_filename(uploaded_file.filename) if filename is '': return 'EMPTY' temp_path = os.path.join( config.get('SAMPLE_REPOSITORY', ''), 'TempFiles', filename) # Save to temporary location uploaded_file.save(temp_path) final_path = os.path.join( config.get('SAMPLE_REPOSITORY', ''), 'LogFiles', '{id}{ext}'.format(id=test.id, ext='.txt')) os.rename(temp_path, final_path) elif request.form['type'] == 'upload': log.debug('Upload for {t}/{rt}/{rto}'.format( t=test_id, rt=request.form['test_id'], rto=request.form['test_file_id'])) # File upload, process if 'file' in request.files: uploaded_file = request.files['file'] filename = secure_filename(uploaded_file.filename) if filename is '': return 'EMPTY' temp_path = os.path.join( config.get('SAMPLE_REPOSITORY', ''), 'TempFiles', filename) # Save to temporary location uploaded_file.save(temp_path) # Get hash and check if it's already been submitted hash_sha256 = hashlib.sha256() with open(temp_path, "rb") as f: for chunk in iter(lambda: f.read(4096), b""): hash_sha256.update(chunk) file_hash = hash_sha256.hexdigest() filename, file_extension = os.path.splitext(filename) final_path = os.path.join( config.get('SAMPLE_REPOSITORY', ''), 'TestResults', '{hash}{ext}'.format(hash=file_hash, ext=file_extension)) os.rename(temp_path, final_path) rto = RegressionTestOutput.query.filter( RegressionTestOutput.id == request.form['test_file_id']).first() result_file = TestResultFile(test.id, request.form['test_id'], rto.id, rto.correct, file_hash) g.db.add(result_file) g.db.commit() elif request.form['type'] == 'finish': log.debug('Finish for {t}/{rt}'.format( t=test_id, rt=request.form['test_id'])) # Test was done regression_test = RegressionTest.query.filter( RegressionTest.id == request.form['test_id']).first() result = TestResult(test.id, regression_test.id, request.form['runTime'], request.form['exitCode'], regression_test.expected_rc) g.db.add(result) g.db.commit() return "OK" return "FAIL"