def test_that_init_works_correctly(self): key = general_data.key2 value = general_data.value2 actual = GeneralData(key, value) self.assertEqual(actual.key, key) self.assertEqual(actual.value, value)
def test_that_representation_works(self): key = general_data.key2 value = general_data.value2 actual = GeneralData(key, value) expected = '<GeneralData {key}: {value}>'.format(key=key, value=value) self.assertEqual(str(actual), expected)
def run(): from mod_home.models import CCExtractorVersion, GeneralData from mod_regression.models import Category, RegressionTest, InputType, OutputType, RegressionTestOutput from mod_sample.models import Sample from mod_upload.models import Upload from mod_auth.models import User from database import create_session db = create_session(sys.argv[1]) entries = [] categories = [ Category('Broken', 'Samples that are broken'), Category('DVB', 'Samples that contain DVB subtitles'), Category('DVD', 'Samples that contain DVD subtitles'), Category('MP4', 'Samples that are stored in the MP4 format'), Category('General', 'General regression samples') ] entries.extend(categories) samples = [ Sample('sample1', 'ts', 'sample1'), Sample('sample2', 'ts', 'sample2') ] entries.extend(samples) cc_version = CCExtractorVersion( '0.84', '2016-12-16T00:00:00Z', '77da2dc873cc25dbf606a3b04172aa9fb1370f32') entries.append(cc_version) regression_tests = [ RegressionTest(1, '-autoprogram -out=ttxt -latin1', InputType.file, OutputType.file, 3, 10), RegressionTest(2, '-autoprogram -out=ttxt -latin1 -ucla', InputType.file, OutputType.file, 1, 10) ] entries.extend(regression_tests) gen_data = GeneralData('last_commit', '71dffd6eb30c1f4b5cf800307de845072ce33262') entries.append(gen_data) regression_test_output = [ RegressionTestOutput(1, "test1", "srt", "test1.srt"), RegressionTestOutput(2, "test2", "srt", "test2.srt") ] entries.extend(regression_test_output) for entry in entries: try: db.add(entry) db.commit() except IntegrityError: print("Entry already exists!", entry, flush=True) db.rollback()
def run(): from mod_home.models import CCExtractorVersion, GeneralData from mod_regression.models import Category, RegressionTest, InputType, \ OutputType from mod_sample.models import Sample from mod_upload.models import Upload from mod_auth.models import User from database import create_session db = create_session(sys.argv[1]) categories = [ Category('Broken', 'Samples that are broken'), Category('DVB', 'Samples that contain DVB subtitles'), Category('DVD', 'Samples that contain DVD subtitles'), Category('MP4', 'Samples that are stored in the MP4 format'), Category('General', 'General regression samples') ] db.add_all(categories) db.commit() samples = [ Sample('sample1', 'ts', 'sample1'), Sample('sample2', 'ts', 'sample2') ] db.add_all(samples) db.commit() cc_version = CCExtractorVersion( '0.84', '2016-12-16', '77da2dc873cc25dbf606a3b04172aa9fb1370f32' ) db.add(cc_version) db.commit() regression_tests = [ RegressionTest( 1, '-autoprogram -out=ttxt -latin1', InputType.file, OutputType.file, 3, 10), RegressionTest( 2, '-autoprogram -out=ttxt -latin1 -ucla', InputType.file, OutputType.file, 1, 10) ] db.add_all(regression_tests) db.commit() gen_data = GeneralData( 'last_commit', '71dffd6eb30c1f4b5cf800307de845072ce33262') db.add(gen_data) db.commit()
def progress_reporter(test_id, token): """ Handle the progress of a certain test after validating the token. If necessary, update the status on GitHub. :param test_id: The id of the test to update. :type test_id: int :param token: The token to check the validity of the request. :type token: str :return: Nothing. :rtype: None """ from run import config, log # Verify token test = Test.query.filter(Test.id == test_id).first() if test is not None and test.token == token: repo_folder = config.get('SAMPLE_REPOSITORY', '') if 'type' in request.form: if request.form['type'] == 'progress': # Progress, log status = TestStatus.from_string(request.form['status']) # Check whether test is not running previous status again istatus = TestStatus.progress_step(status) message = request.form['message'] if len(test.progress) != 0: laststatus = TestStatus.progress_step( test.progress[-1].status) if laststatus in [ TestStatus.completed, TestStatus.canceled ]: return "FAIL" if laststatus > istatus: status = TestStatus.canceled message = "Duplicate Entries" progress = TestProgress(test.id, status, message) g.db.add(progress) g.db.commit() gh = GitHub(access_token=g.github['bot_token']) repository = gh.repos(g.github['repository_owner'])( g.github['repository']) # Store the test commit for testing in case of commit if status == TestStatus.completed: commit_name = 'fetch_commit_' + test.platform.value commit = GeneralData.query.filter( GeneralData.key == commit_name).first() fetch_commit = Test.query.filter( and_(Test.commit == commit.value, Test.platform == test.platform)).first() if test.test_type == TestType.commit and test.id > fetch_commit.id: commit.value = test.commit g.db.commit() # If status is complete, remove the Kvm entry if status in [TestStatus.completed, TestStatus.canceled]: log.debug("Test {id} has been {status}".format( id=test_id, status=status)) var_average = 'average_time_' + test.platform.value current_average = GeneralData.query.filter( GeneralData.key == var_average).first() average_time = 0 total_time = 0 if current_average is None: platform_tests = g.db.query(Test.id).filter( Test.platform == test.platform).subquery() finished_tests = g.db.query( TestProgress.test_id).filter( and_( TestProgress.status.in_([ TestStatus.canceled, TestStatus.completed ]), TestProgress.test_id.in_( platform_tests))).subquery() in_progress_statuses = [ TestStatus.preparation, TestStatus.completed, TestStatus.canceled ] finished_tests_progress = g.db.query( TestProgress).filter( and_( TestProgress.test_id.in_(finished_tests), TestProgress.status.in_( in_progress_statuses))).subquery() times = g.db.query( finished_tests_progress.c.test_id, label( 'time', func.group_concat( finished_tests_progress.c.timestamp)) ).group_by(finished_tests_progress.c.test_id).all() for p in times: parts = p.time.split(',') start = datetime.datetime.strptime( parts[0], '%Y-%m-%d %H:%M:%S') end = datetime.datetime.strptime( parts[-1], '%Y-%m-%d %H:%M:%S') total_time += (end - start).total_seconds() if len(times) != 0: average_time = total_time // len(times) new_avg = GeneralData(var_average, average_time) g.db.add(new_avg) g.db.commit() else: all_results = TestResult.query.count() regression_test_count = RegressionTest.query.count() number_test = all_results / regression_test_count updated_average = float( current_average.value) * (number_test - 1) pr = test.progress_data() end_time = pr['end'] start_time = pr['start'] if end_time.tzinfo is not None: end_time = end_time.replace(tzinfo=None) if start_time.tzinfo is not None: start_time = start_time.replace(tzinfo=None) last_running_test = end_time - start_time updated_average = updated_average + last_running_test.total_seconds( ) current_average.value = updated_average // number_test g.db.commit() kvm = Kvm.query.filter(Kvm.test_id == test_id).first() if kvm is not None: log.debug("Removing KVM entry") g.db.delete(kvm) g.db.commit() # Post status update state = Status.PENDING target_url = url_for('test.by_id', test_id=test.id, _external=True) context = "CI - {name}".format(name=test.platform.value) if status == TestStatus.canceled: state = Status.ERROR message = 'Tests aborted due to an error; please check' elif status == TestStatus.completed: # Determine if success or failure # It fails if any of these happen: # - A crash (unexpected exit code) # - A not None value on the "got" of a TestResultFile ( # meaning the hashes do not match) crashes = g.db.query(count(TestResult.exit_code)).filter( and_(TestResult.test_id == test.id, TestResult.exit_code != TestResult.expected_rc)).scalar() results_zero_rc = g.db.query(RegressionTest.id).filter( RegressionTest.expected_rc == 0).subquery() results = g.db.query(count(TestResultFile.got)).filter( and_( TestResultFile.test_id == test.id, TestResultFile.regression_test_id.in_( results_zero_rc), TestResultFile.got.isnot(None))).scalar() log.debug( 'Test {id} completed: {crashes} crashes, {results} results' .format(id=test.id, crashes=crashes, results=results)) if crashes > 0 or results > 0: state = Status.FAILURE message = 'Not all tests completed successfully, please check' else: state = Status.SUCCESS message = 'Tests completed' update_build_badge(state, test) else: message = progress.message gh_commit = repository.statuses(test.commit) try: gh_commit.post(state=state, description=message, context=context, target_url=target_url) except ApiError as a: log.error( 'Got an exception while posting to GitHub! Message: {message}' .format(message=a.message)) if status in [TestStatus.completed, TestStatus.canceled]: # Start next test if necessary, on the same platform process = Process(target=start_platform, args=(g.db, repository, 60)) process.start() elif request.form['type'] == 'equality': log.debug('Equality for {t}/{rt}/{rto}'.format( t=test_id, rt=request.form['test_id'], rto=request.form['test_file_id'])) rto = RegressionTestOutput.query.filter( RegressionTestOutput.id == request.form['test_file_id']).first() if rto is None: # Equality posted on a file that's ignored presumably log.info('No rto for {test_id}: {test}'.format( test_id=test_id, test=request.form['test_id'])) else: result_file = TestResultFile(test.id, request.form['test_id'], rto.id, rto.correct) g.db.add(result_file) g.db.commit() elif request.form['type'] == 'logupload': log.debug("Received log file for test {id}".format(id=test_id)) # File upload, process if 'file' in request.files: uploaded_file = request.files['file'] filename = secure_filename(uploaded_file.filename) if filename is '': return 'EMPTY' temp_path = os.path.join(repo_folder, 'TempFiles', filename) # Save to temporary location uploaded_file.save(temp_path) final_path = os.path.join( repo_folder, 'LogFiles', '{id}{ext}'.format(id=test.id, ext='.txt')) os.rename(temp_path, final_path) log.debug("Stored log file") elif request.form['type'] == 'upload': log.debug('Upload for {t}/{rt}/{rto}'.format( t=test_id, rt=request.form['test_id'], rto=request.form['test_file_id'])) # File upload, process if 'file' in request.files: uploaded_file = request.files['file'] filename = secure_filename(uploaded_file.filename) if filename is '': return 'EMPTY' temp_path = os.path.join(repo_folder, 'TempFiles', filename) # Save to temporary location uploaded_file.save(temp_path) # Get hash and check if it's already been submitted hash_sha256 = hashlib.sha256() with open(temp_path, "rb") as f: for chunk in iter(lambda: f.read(4096), b""): hash_sha256.update(chunk) file_hash = hash_sha256.hexdigest() filename, file_extension = os.path.splitext(filename) final_path = os.path.join( repo_folder, 'TestResults', '{hash}{ext}'.format(hash=file_hash, ext=file_extension)) os.rename(temp_path, final_path) rto = RegressionTestOutput.query.filter( RegressionTestOutput.id == request.form['test_file_id']).first() result_file = TestResultFile(test.id, request.form['test_id'], rto.id, rto.correct, file_hash) g.db.add(result_file) g.db.commit() elif request.form['type'] == 'finish': log.debug('Finish for {t}/{rt}'.format( t=test_id, rt=request.form['test_id'])) regression_test = RegressionTest.query.filter( RegressionTest.id == request.form['test_id']).first() result = TestResult(test.id, regression_test.id, request.form['runTime'], request.form['exitCode'], regression_test.expected_rc) g.db.add(result) try: g.db.commit() except IntegrityError as e: log.error('Could not save the results: {msg}'.format( msg=e.message)) return "OK" return "FAIL"
def start_ci(): """ Gets called when the webhook on GitHub is triggered. Reaction to the next events need to be processed (after verification): - Ping (for fun) - Push - Pull Request - Issues """ if request.method != 'POST': return 'OK' else: abort_code = 418 event = request.headers.get('X-GitHub-Event') if event == "ping": return json.dumps({'msg': 'Hi!'}) x_hub_signature = request.headers.get('X-Hub-Signature') if not is_valid_signature(x_hub_signature, request.data, g.github['ci_key']): g.log.warning( 'CI signature failed: {sig}'.format(sig=x_hub_signature)) abort(abort_code) payload = request.get_json() if payload is None: g.log.warning( 'CI payload is empty: {payload}'.format(payload=payload)) abort(abort_code) gh = GitHub(access_token=g.github['bot_token']) repository = gh.repos(g.github['repository_owner'])( g.github['repository']) if event == "push": # If it's a push, and the 'after' hash is available, then it's a commit, so run the tests if 'after' in payload: commit = payload['after'] gh_commit = repository.statuses(commit) # Update the db to the new last commit ref = repository.git().refs('heads/master').get() last_commit = GeneralData.query.filter( GeneralData.key == 'last_commit').first() for platform in TestPlatform.values(): commit_name = 'fetch_commit_' + platform fetch_commit = GeneralData.query.filter( GeneralData.key == commit_name).first() if fetch_commit is None: prev_commit = GeneralData(commit_name, last_commit.value) g.db.add(prev_commit) last_commit.value = ref['object']['sha'] g.db.commit() queue_test(g.db, gh_commit, commit, TestType.commit) else: g.log.warning( 'Unknown push type! Dumping payload for analysis') g.log.debug(payload) elif event == "pull_request": # If it's a valid PR, run the tests commit = '' gh_commit = None pr_nr = payload['pull_request']['number'] if payload['action'] in ['opened', 'synchronize', 'reopened']: try: commit = payload['pull_request']['head']['sha'] gh_commit = repository.statuses(commit) except KeyError: g.log.critical( "Didn't find a SHA value for a newly opened PR!") g.log.debug(payload) # Check if user blacklisted user_id = payload['pull_request']['user']['id'] if BlockedUsers.query.filter( BlockedUsers.userID == user_id).first() is not None: g.log.critical("User Blacklisted") gh_commit.post( state=Status.ERROR, description= "CI start aborted. You may be blocked from accessing this functionality", target_url=url_for('home.index', _external=True)) return 'ERROR' queue_test(g.db, gh_commit, commit, TestType.pull_request, pr_nr=pr_nr) elif payload['action'] == 'closed': g.log.debug('PR was closed, no after hash available') # Cancel running queue tests = Test.query.filter(Test.pr_nr == pr_nr).all() for test in tests: # Add canceled status only if the test hasn't started yet if len(test.progress) > 0: continue progress = TestProgress(test.id, TestStatus.canceled, "PR closed", datetime.datetime.now()) g.db.add(progress) repository.statuses(test.commit).post( state=Status.FAILURE, description="Tests canceled", context="CI - {name}".format(name=test.platform.value), target_url=url_for('test.by_id', test_id=test.id, _external=True)) elif event == "issues": issue_data = payload['issue'] issue = Issue.query.filter( Issue.issue_id == issue_data['number']).first() if issue is not None: issue.title = issue_data['title'] issue.status = issue_data['state'] g.db.commit() else: # Unknown type g.log.warning('CI unrecognized event: {event}'.format(event=event)) return json.dumps({'msg': 'EOL'})
def start_ci(): """ Function that track the event occuring at the repository Events that are tracked: Push: Run the tests and update last commit Pull Request: If it is a pr, run the tests Issues: Update the status of recoded issues """ if request.method != 'POST': return 'OK' else: abort_code = 418 event = request.headers.get('X-GitHub-Event') if event == "ping": return json.dumps({'msg': 'Hi!'}) x_hub_signature = request.headers.get('X-Hub-Signature') if not is_valid_signature(x_hub_signature, request.data, g.github['ci_key']): g.log.warning('CI signature failed: %s' % x_hub_signature) abort(abort_code) payload = request.get_json() if payload is None: g.log.warning('CI payload is empty: %s' % payload) abort(abort_code) gh = GitHub(access_token=g.github['bot_token']) repository = gh.repos(g.github['repository_owner'])( g.github['repository']) if event == "push": # If it's a push, and the 'after' hash is available, then it's # a commit, so run the tests if 'after' in payload: commit = payload['after'] gh_commit = repository.statuses(commit) # Update the db to the new last commit ref = repository.git().refs('heads/master').get() last_commit = GeneralData.query.filter( GeneralData.key == 'last_commit').first() for platform in TestPlatform.values(): commit_name = 'fetch_commit_' + platform fetch_commit = GeneralData.query.filter( GeneralData.key == commit_name).first() if fetch_commit is None: prev_commit = GeneralData(commit_name, last_commit.value) g.db.add(prev_commit) last_commit.value = ref['object']['sha'] g.db.commit() queue_test(g.db, repository, gh_commit, commit, TestType.commit) else: g.log.warning('Unknown push type! Dumping payload for ' 'analysis') g.log.debug(payload) elif event == "pull_request": # If it's a PR, run the tests if payload['action'] == 'opened': try: commit = payload['pull_request']['head']['sha'] except KeyError: g.log.critical( "Didn't find a SHA value for a newly opened PR!") g.log.debug(payload) commit = '' elif payload['action'] == 'closed': g.log.debug('PR was closed, no after hash available') commit = '' else: try: commit = payload['after'] except KeyError: g.log.critical("Didn't find the after SHA for the " "updated commit!") g.log.debug(payload) commit = '' pr_nr = payload['pull_request']['number'] gh_commit = repository.statuses(commit) if payload['action'] == 'opened': # Run initial tests queue_test(g.db, repository, gh_commit, commit, TestType.pull_request, pr_nr=pr_nr) elif payload['action'] == 'synchronize': # Run/queue a new test set queue_test(g.db, repository, gh_commit, commit, TestType.pull_request, pr_nr=pr_nr) elif payload['action'] == 'closed': # Cancel running queue tests = Test.query.filter(Test.pr_nr == pr_nr).all() for test in tests: # Add canceled status only if the test hasn't started yet if len(test.progress) > 0: continue progress = TestProgress(test.id, TestStatus.canceled, "PR closed", datetime.datetime.now()) g.db.add(progress) repository.statuses(test.commit).post( state=Status.FAILURE, description="Tests canceled", context="CI - %s" % test.platform.value, target_url=url_for('test.by_id', test_id=test.id, _external=True)) elif payload['action'] == 'reopened': # Run tests again queue_test(g.db, repository, gh_commit, commit, TestType.pull_request) elif event == "issues": issue_data = payload['issue'] issue = Issue.query.filter( Issue.issue_id == issue_data['number']).first() if issue is not None: issue.title = issue_data['title'] issue.status = issue_data['state'] g.db.commit() else: # Unknown type g.log.warning('CI unrecognized event: %s' % event) return json.dumps({'msg': 'EOL'})
def setUp(self): self.app.preprocess_request() g.db = create_session(self.app.config['DATABASE_URI'], drop_tables=True) # enable Foreign keys for unit tests g.db.execute('pragma foreign_keys=on') general_data = [ GeneralData('last_commit', "1978060bf7d2edd119736ba3ba88341f3bec3323"), GeneralData(f'fetch_commit_{TestPlatform.linux.value}', "1978060bf7d2edd119736ba3ba88341f3bec3323"), GeneralData(f'fetch_commit_{TestPlatform.windows.value}', "1978060bf7d2edd119736ba3ba88341f3bec3323") ] g.db.add_all(general_data) self.ccextractor_version = CCExtractorVersion( "1.2.3", "2013-02-27T19:35:32Z", "1978060bf7d2edd119736ba3ba88341f3bec3323") g.db.add(self.ccextractor_version) fork = Fork( f"https://github.com/{g.github['repository_owner']}/{g.github['repository']}.git" ) g.db.add(fork) g.db.commit() dummy_user = User(signup_information['existing_user_name'], signup_information['existing_user_role'], signup_information['existing_user_email'], signup_information['existing_user_pwd']) g.db.add(dummy_user) g.db.commit() test = [ Test(TestPlatform.linux, TestType.pull_request, 1, "master", "1978060bf7d2edd119736ba3ba88341f3bec3323", 1), Test(TestPlatform.linux, TestType.pull_request, 1, "master", "abcdefgh", 1) ] g.db.add_all(test) g.db.commit() categories = [ Category("Broken", "Samples that are broken"), Category("DVB", "Samples that contain DVB subtitles"), Category("DVD", "Samples that contain DVD subtitles"), Category("MP4", "Samples that are stored in the MP4 format"), Category("General", "General regression samples") ] g.db.add_all(categories) g.db.commit() samples = [ Sample("sample1", "ts", "sample1"), Sample("sample2", "ts", "sample2") ] g.db.add_all(samples) g.db.commit() upload = [ Upload(1, 1, 1, Platform.windows), Upload(1, 2, 1, Platform.linux) ] g.db.add_all(upload) g.db.commit() regression_tests = [ RegressionTest(1, "-autoprogram -out=ttxt -latin1 -2", InputType.file, OutputType.file, 3, 10), RegressionTest(2, "-autoprogram -out=ttxt -latin1 -ucla", InputType.file, OutputType.file, 1, 10) ] g.db.add_all(regression_tests) g.db.commit() categories[0].regression_tests.append(regression_tests[0]) categories[2].regression_tests.append(regression_tests[1]) regression_test_outputs = [ RegressionTestOutput(1, "sample_out1", ".srt", ""), RegressionTestOutput(2, "sample_out2", ".srt", "") ] g.db.add_all(regression_test_outputs) g.db.commit() rtof = RegressionTestOutputFiles("bluedabadee", 2) g.db.add(rtof) g.db.commit() test_result_progress = [ TestProgress(1, TestStatus.preparation, "Test 1 preparation"), TestProgress(1, TestStatus.building, "Test 1 building"), TestProgress(1, TestStatus.testing, "Test 1 testing"), TestProgress(1, TestStatus.completed, "Test 1 completed"), TestProgress(2, TestStatus.preparation, "Test 2 preparation"), TestProgress(2, TestStatus.building, "Test 2 building"), TestProgress(2, TestStatus.testing, "Test 2 testing"), TestProgress(2, TestStatus.completed, "Test 2 completed") ] g.db.add_all(test_result_progress) g.db.commit() test_results = [ TestResult(1, 1, 200, 0, 0), TestResult(1, 2, 601, 0, 0), TestResult(2, 1, 200, 200, 0), TestResult(2, 2, 601, 0, 0) ] g.db.add_all(test_results) g.db.commit() test_result_files = [ TestResultFile(1, 1, 1, "sample_out1"), TestResultFile(1, 2, 2, "sample_out2"), TestResultFile(2, 1, 1, "sample_out1"), TestResultFile(2, 2, 2, "sample_out2", "out2") ] g.db.add_all(test_result_files) g.db.commit() forbidden_mime = ForbiddenMimeType("application/javascript") forbidden_ext = [ForbiddenExtension("js"), ForbiddenExtension("com")] g.db.add(forbidden_mime) g.db.add_all(forbidden_ext) g.db.commit()
def setUp(self): self.app.preprocess_request() g.db = create_session(self.app.config['DATABASE_URI'], drop_tables=True) g.db.execute('pragma foreign_keys=on') # Enable Foreign for unit tests commit_name_linux = 'fetch_commit_' + TestPlatform.linux.value commit_name_windows = 'fetch_commit_' + TestPlatform.windows.value general_data = [ GeneralData('last_commit', '1978060bf7d2edd119736ba3ba88341f3bec3323'), GeneralData(commit_name_linux, '1978060bf7d2edd119736ba3ba88341f3bec3323'), GeneralData(commit_name_windows, '1978060bf7d2edd119736ba3ba88341f3bec3323') ] g.db.add_all(general_data) self.ccextractor_version = CCExtractorVersion( '1.2.3', '2013-02-27T19:35:32Z', '1978060bf7d2edd119736ba3ba88341f3bec3323') g.db.add(self.ccextractor_version) fork_url = ('https://github.com/{user}/{repo}.git').format( user=g.github['repository_owner'], repo=g.github['repository']) fork = Fork(fork_url) g.db.add(fork) g.db.commit() dummy_user = User(signup_information['existing_user_name'], signup_information['existing_user_role'], signup_information['existing_user_email'], signup_information['existing_user_pwd']) g.db.add(dummy_user) g.db.commit() test = [ Test(TestPlatform.linux, TestType.pull_request, 1, 'master', '1978060bf7d2edd119736ba3ba88341f3bec3323', 1), Test(TestPlatform.linux, TestType.pull_request, 1, 'master', 'abcdefgh', 1) ] g.db.add_all(test) g.db.commit() categories = [ Category('Broken', 'Samples that are broken'), Category('DVB', 'Samples that contain DVB subtitles'), Category('DVD', 'Samples that contain DVD subtitles'), Category('MP4', 'Samples that are stored in the MP4 format'), Category('General', 'General regression samples') ] g.db.add_all(categories) g.db.commit() samples = [ Sample('sample1', 'ts', 'sample1'), Sample('sample2', 'ts', 'sample2') ] g.db.add_all(samples) g.db.commit() upload = [ Upload(1, 1, 1, Platform.windows), Upload(1, 2, 1, Platform.linux) ] g.db.add_all(upload) g.db.commit() regression_tests = [ RegressionTest(1, '-autoprogram -out=ttxt -latin1 -2', InputType.file, OutputType.file, 3, 10), RegressionTest(2, '-autoprogram -out=ttxt -latin1 -ucla', InputType.file, OutputType.file, 1, 10) ] g.db.add_all(regression_tests) g.db.commit() categories[0].regression_tests.append(regression_tests[0]) categories[2].regression_tests.append(regression_tests[1]) regression_test_outputs = [ RegressionTestOutput(1, 'sample_out1', '.srt', ''), RegressionTestOutput(2, 'sample_out2', '.srt', '') ] g.db.add_all(regression_test_outputs) g.db.commit() test_result_progress = [ TestProgress(1, TestStatus.preparation, "Test 1 preperation"), TestProgress(1, TestStatus.building, "Test 1 building"), TestProgress(1, TestStatus.testing, "Test 1 testing"), TestProgress(1, TestStatus.completed, "Test 1 completed"), TestProgress(2, TestStatus.preparation, "Test 2 preperation"), TestProgress(2, TestStatus.building, "Test 2 building"), TestProgress(2, TestStatus.testing, "Test 2 testing"), TestProgress(2, TestStatus.completed, "Test 2 completed") ] g.db.add_all(test_result_progress) g.db.commit() test_results = [ TestResult(1, 1, 200, 0, 0), TestResult(1, 2, 601, 0, 0), TestResult(2, 1, 200, 200, 0), TestResult(2, 2, 601, 0, 0) ] g.db.add_all(test_results) g.db.commit() test_result_files = [ TestResultFile(1, 1, 1, 'sample_out1'), TestResultFile(1, 2, 2, 'sample_out2'), TestResultFile(2, 1, 1, 'sample_out1'), TestResultFile(2, 2, 2, 'sample_out2', 'out2') ] g.db.add_all(test_result_files) g.db.commit() forbidden_mime = ForbiddenMimeType('application/javascript') forbidden_ext = [ForbiddenExtension('js'), ForbiddenExtension('com')] g.db.add(forbidden_mime) g.db.add_all(forbidden_ext) g.db.commit()