def runTest(files, fun): with shell.tempDir() as d: c = mkConfig(d) studDir = shell.pjoin(d, student) shell.mkdir(studDir) for f in files: shell.touch(shell.pjoin(studDir, f)) fun(c)
def copyFileIfNotExists(srcDir, path, targetDir): srcPath = shell.pjoin(srcDir, path) if not shell.isFile(srcPath): raise IOError(f'{srcPath} must be a file') tgtPath = shell.pjoin(targetDir, path) if shell.isDir(tgtPath): raise IOError(f'{tgtPath} must not be a directory') shell.mkdir(shell.dirname(tgtPath), createParents=True) if shell.isFile(tgtPath): if not hasSameContent(srcPath, tgtPath): raise IOError(f'Target file {tgtPath} already exists with content different than in {srcPath}') else: shell.cp(srcPath, tgtPath)
def copyIntoStudentDir(assignment: Assignment, studentDir: str): for src in assignment.itemsToCopy: target = shell.pjoin(studentDir, shell.basename(src)) if not fileSystemItemEquals(src, target): print(f'Copying {src} to {studentDir} ...') moveToBackup(target) shell.cp(src, studentDir)
def parse(baseDir, configDict, ymlDict): assignments= [] for k, v in ymlDict['assignments'].items(): a = Assignment.parse(k, [v, ymlDict]) assignments.append(a) testDir = ymlDict.get('test-dir', shell.pjoin(baseDir, 'tests')) return Config(baseDir, configDict, assignments, testDir)
def runJplag(opts, args, fileGlob=None): jarName = 'jplag-3.0.0-jar-with-dependencies.jar' jarFile = shell.pjoin(shell.dirname(__file__), '..', 'resources', jarName) print('Running jplag, this might take a while ...') res = shell.run(['java', '-jar', jarFile] + opts, captureStdout=True, stderrToStdout=True) haveMinScore = False for l in res.stdout.split('\n'): if 'no viable alternative at input' in l: pass elif 'nothing to parse for submission' in l: pass elif l.startswith('Comparing '): i = -1 try: i = l.rindex(':') except ValueError: verbose(l) if i >= 0: score = float(l[i + 1:].strip()) if score >= args.minScore: haveMinScore = True print(ansi.red(l)) if args.printDiff and score >= 99.99999999999999999: printDiffForJplag(l, fileGlob) elif 'Writing results to' in l: print(ansi.blue(l + "/index.html")) else: verbose(l) if not haveMinScore: print(f'No submissions detected with similaries >= {args.minScore}')
def runJplagForFile(lang, kind, file, args): print() print(ansi.green(f'Running jplag for file {file}')) runJplag([ '-r', shell.pjoin(resultDir, kind, file), '-l', lang, '-p', file, '.' ], args, file)
def parse(baseDir, id, dicts): points = getFromDicts(dicts, Keys.points, int) kind = getFromDicts(dicts, Keys.kind) tests = getFromDicts(dicts, Keys.tests, default={}, fail=False) if not tests: files = getSingularPlural(dicts, Keys.testFile, Keys.testFiles) filters = getSingularPlural(dicts, Keys.testFilter, Keys.testFilters) dir = getFromDicts(dicts, Keys.testDir, default=defaultTestDir(baseDir)) parsedTests = [] for f in files: testId = str(id) if len(files) + len(filters) > 1: testId = f'{testId}_{shell.removeExt(shell.basename(f))}' parsedTests.append(Test(testId, dir, None, shell.pjoin(dir, f))) for f in filters: testId = str(id) if len(files) + len(filters) > 1: testId = f'{testId}_{f.replace("*", "ALL")}' parsedTests.append(Test(testId, dir, f, None)) else: parsedTests = [] for k, v in tests.items(): t = Test.parse(baseDir, id, k, [v] + dicts) parsedTests.append(t) disabledTests = getFromDicts(dicts, 'disable-tests', default=False) if disabledTests: parsedTests = [] return Assignment(id, baseDir, points, kind, parsedTests, dicts)
def mkConfig(baseDir, configDict): if not shell.isdir(baseDir): abort('Base directory {baseDir} does not exist') yamlPath = shell.pjoin(baseDir, 'check.yml') if not shell.isFile(yamlPath): abort(f'Config file {yamlPath} not found') s = utils.readFile(yamlPath) return mkConfigFromString(baseDir, configDict, s)
def mkConfig(baseDir, configDict): if not shell.isdir(baseDir): abort('Base directory {baseDir} does not exist') yamlPath = shell.pjoin(baseDir, 'check.yml') if not shell.isFile(yamlPath): abort(f'Config file {yamlPath} not found') s = utils.readFile(yamlPath) ymlDict = yaml.load(s, Loader=yaml.FullLoader) return Config.parse(baseDir, configDict, ymlDict)
def getFile(self, k, d, fail=True): x = getFromDicts(self.dicts, k, fail=fail) if x is not None: return shell.pjoin(d, x) else: if fail: raise ValueError(f'Key {k} must be set for assignment {self.id}') else: return None
def action(student, k, d, missing, superfluous): for m in missing: candidates = [] for s in superfluous: if s.endswith(m) or len(superfluous) == 1: candidates.append(s) verbose( f'Fixing filenames for student {student} and kind {k}. candidates={candidates}' ) if len(candidates) > 1: print( f'Cannot fix name of assignment {m} for {student} because there is more than one matching file' ) elif len(candidates) == 1: c = candidates[0] src = shell.pjoin(d, c) tgt = shell.pjoin(d, m) verbose(f'Renaming {src} to {tgt}') shell.mv(src, tgt)
def moveToBackup(path): if not shell.exists(path): return for i in range(1000): backupName = shell.pjoin(shell.dirname(path), '.' + shell.basename(path) + ".bak") if i > 0: backupName = backupName + "." + str(i) if not shell.exists(backupName): shell.mv(path, backupName) return raise ValueError(f"too many backups for {path}")
def fixFilenames(config): submissionDirs = collectSubmissionDirs(config) for d in submissionDirs: student = shell.basename(d) files = set([ shell.basename(f) for f in shell.ls(d, config.submissionFileGlob) ]) expected = set(config.assignments) missing = expected - files superfluous = files - expected for m in missing: candidates = [] for s in superfluous: if s.endswith(m) or len(superfluous) == 1: candidates.append(s) if len(candidates) > 1: print( f'Cannot fix name of assignment {m} for {student} because there is more than one matching file' ) elif len(candidates) == 1: c = candidates[0] # Windows shell.run(['mv', '-i', shell.pjoin(d, c), shell.pjoin(d, m)])
def copyTemplate(studentDir: str, studentId: str, path: str, copy: bool): (b, e) = shell.splitExt(shell.basename(path)) for t in ['_TEMPLATE_', 'TEMPLATE_', '_TEMPLATE', 'TEMPLATE']: b = b.replace(t, '') b = b + '_' + studentId newPath = shell.pjoin(studentDir, b) + e if not shell.isFile(newPath): if copy: note(f"Copying template {path} to {newPath}") shell.cp(path, newPath) spreadsheet.replaceData(newPath, 'ID', 'STUDENT_ID', studentId) else: return None return newPath
def addComment(cfg): submissionDirs = collectSubmissionDirs(cfg) print(submissionDirs) for d in submissionDirs: p = shell.pjoin(d, cfg.commentsFile) verbose(f'Writing file {p}') writeFile( p, """In dieser Datei stehen allgemeine Kommentare zur Abgabe. Die Bewertung finden Sie in der Datei POINTS.txt. Möglicherweise enthalten die Quelldateien aufgaben-spezifische Kommentare. Diese sind mit TUTOR/TUTORIN oder DOZENT/DOZENTIN gekennzeichnet. ============================================================================================================= """)
def spreadsheetTest(action, check=None, path='test-data/bewertung.xlsx', sheetName=None): with shell.tempDir(delete=False, dir='.') as d: name = f'test_{uuid.uuid4()}.xlsx' p = shell.pjoin(d, name) shell.cp(path, p) x = action(p) if check: wb = exc.load_workbook(filename=p) if sheetName: sheet = wb[sheetName] else: sheet = wb.active check(sheet) return x
def _runJavaTest(ctx, studentDir: str, codeDir: str, assignment: Assignment, testId: str, testDir: str, filter: Optional[str], hasTests: bool, isStudent: bool): cfg = ctx.cfg if filter is None: filter = '*' gradleProps = { 'testFilter': filter, 'testDir': testDir, 'studentDir': codeDir } gradlePropArgs = [] for k, v in gradleProps.items(): gradlePropArgs.append(f'-P{k}={v}') print() print(blue(f"Starting test {testId}")) with shell.workingDir(cfg.baseDir): if not shell.isFile('build.gradle'): abort(f'No build.gradle file in {cfg.baseDir}, aborting') if not hasTests: gradleCmd = 'compileJava' else: gradleCmd = 'test' cmd = [cfg.gradlePath] + gradlePropArgs + [gradleCmd, '--rerun-tasks'] print(f'Executing {" ".join(cmd)}') logFileName = shell.pjoin(studentDir, f'OUTPUT_{testId}.txt') with shell.createTee([shell.TEE_STDOUT, logFileName]) as tee: result = shell.run(cmd, onError='ignore', stderrToStdout=True, captureStdout=tee) output = open(logFileName, 'r').read() if result.exitcode == 0: print(green(f'Test {testId} OK')) else: print(red(f'Test {testId} FAILED, see above')) result = Result.parseResult(output) prefix = 'S' if isStudent else '' ctx.storeTestResultInSpreadsheet(studentDir, assignment, testId, [prefix + 'C'], 0 if result.compileError else 1) if hasTests: ctx.storeTestResultInSpreadsheet(studentDir, assignment, testId, [prefix + 'T'], result.ratio())
def addComment(cfg): submissionDirs = collectSubmissionDirs(cfg) print(submissionDirs) for d in submissionDirs: p = shell.pjoin(d, cfg.commentsFile) verbose(f'Writing file {p}') writeFile(p, """In dieser Datei stehen allgemeine Kommentare zur Abgabe. Die Bewertung finden Sie in der Datei POINTS.txt. Falls für Ihre Abgabe automatisch Tests ausgeführt wurden, finden Sie die Ausgabe der Tests für die i-te Aufgabe in der Datei OUTPUT_i.txt bzw. OUTPUT_student_i.txt (für Ihre eigenen Tests). Möglicherweise enthalten die Quelldateien aufgaben-spezifische Kommentare. Diese sind mit TUTOR/TUTORIN oder DOZENT/DOZENTIN gekennzeichnet, so dass Sie bequem danach suchen können. ============================================================================================================= """)
def parse(baseDir, assignmentId: int, id: Optional[str], dicts: list[dict], fail: bool = True): dir = getFromDicts(dicts, 'test-dir', default=defaultTestDir(baseDir)) file = getFromDicts(dicts, 'test-file', fail=False) if file: file = shell.pjoin(dir, file) filter = getFromDicts(dicts, 'test-filter', fail=False) testId = str(assignmentId) if id: testId = f'{testId}_{id}' if not file and not filter: if fail: testName = f'test "{id}"' if id else 'test' raise ValueError( f"Invalid {testName} without file and without filter") else: return None return Test(testId, dir, filter, file)
#!/usr/bin/env python import sys import os import zipfile thisDir = os.path.dirname(__file__) topDir = os.path.join(thisDir, '..') sys.path.insert(0, os.path.join(topDir, 'src')) import shell import openpyxl as exc checkAssignments = shell.abspath(shell.pjoin(topDir, 'check-assignments')) if sys.platform == 'win32': checkAssignments = checkAssignments + ".bat" def abort(msg): sys.stderr.write(msg + '\n') sys.exit(1) def assertDirExists(path): if not shell.isDir(path): d = shell.dirname(path) files = shell.ls(d, '*') abort(f'File {path} does not exist, existing files: {files}') def assertExists(path): if not shell.isFile(path): d = shell.dirname(path)
def defaultTestDir(baseDir): return shell.pjoin(baseDir, 'tests')
def defaultRatingSheet(baseDir): return shell.pjoin(baseDir, 'rating.xlsx')
def runJplagMerged(lang, kind, ext, args): print() print(ansi.green(f'Running jplag for all assignments of kind {kind}')) runJplag( ['-r', shell.pjoin(resultDir, kind), '-l', lang, '-p', ext, '.'], args, '*' + ext)
def getAsFileList(self, d, k): items = self.getAsList(k) return [shell.pjoin(d, x) for x in items]
def outputFile(self, studentDir): return shell.pjoin(studentDir, f'OUTPUT_{self.id}.txt')
def pointsTemplate(self): return shell.pjoin(self.baseDir, 'POINTS_template.txt')
def ratingCsvPath(self): return shell.pjoin(self.baseDir, 'rating.csv')
def spreadsheetPath(self): return shell.pjoin(self.baseDir, 'rating.xlsx')
#!/usr/bin/env python import sys import os import zipfile thisDir = os.path.dirname(__file__) topDir = os.path.join(thisDir, '..') sys.path.insert(0, os.path.join(topDir, 'src')) import shell checkAssignments = shell.abspath(shell.pjoin(topDir, 'check-assignments')) if sys.platform == 'win32': checkAssignments = checkAssignments + ".bat" def abort(msg): sys.stderr.write(msg + '\n') sys.exit(1) def assertExists(path): if not shell.isFile(path): abort(f'File {path} does not exist') with shell.tempDir(onException=False) as tmp: print(f'tmp={tmp}') shell.cp(shell.pjoin(topDir, 'test-data'), tmp) with shell.workingDir(shell.pjoin(tmp, 'test-data/submissions')): print('### import ###') shell.run([checkAssignments, 'import', '../rating-moodle.csv']) assertExists('rating.xlsx')
def fun(c): fixFilenames(c) studDir = shell.pjoin(c.baseDir, student) existingFiles = [shell.basename(p) for p in shell.ls(studDir)] self.assertEqual(sorted(expectedFiles), sorted(existingFiles))