def run(self, env): """ Build it. """ result = CheckerResult(checker=self) filenames = [name for name in self.get_file_names(env)] args = [self.compiler()] + self.flags(env) + filenames + self.libs() [output,_,_,_] = execute_arglist(args, env.tmpdir(),self.environment()) has_main = re.search(r"^Linking ([^ ]*) ...$",output,re.MULTILINE) if has_main: self._detected_main = has_main.group(1) output = escape(output) output = self.enhance_output(env, output) # Allow server to delete created subfolders execute('chmod -R 0777 *', env.tmpdir()) # We mustn't have any warnings. passed = not self.has_warnings(output) log = self.build_log(output,args,set(filenames).intersection([solutionfile.path() for solutionfile in env.solution().solutionfile_set.all()])) # Now that submission was successfully built, try to find the main modules name again try: if passed : env.set_program(self.main_module(env)) except self.NotFoundError as e: passed = not self._main_required log += "<pre>" + str(e) + "</pre>" result.set_passed(passed) result.set_log(log) return result
def run(self, env): # Save save check configuration config_path = os.path.join(env.tmpdir(), "checks.xml") copy_file(self.configuration.path, config_path) # Run the tests args = [ settings.JVM, "-cp", settings.CHECKSTYLEALLJAR, "-Dbasedir=.", "com.puppycrawl.tools.checkstyle.Main", "-c", "checks.xml" ] + [quote(name) for (name, content) in env.sources()] (output, error, exitcode) = execute(args, env.tmpdir()) # Remove Praktomat-Path-Prefixes from result: output = re.sub(r"^" + re.escape(env.tmpdir()) + "/+", "", output, flags=re.MULTILINE) result = CheckerResult(checker=self) result.set_log('<pre>' + escape(output) + '</pre>') result.set_passed( not error and not re.match('Starting audit...\nAudit done.', output) == None) return result
def run(self, env): """ Runs tests in a special environment. Here's the actual work. This runs the check in the environment ENV, returning a CheckerResult. """ # Setup test_dir = env.tmpdir() replace = [(u'PROGRAM', env.program())] if env.program() else [] copy_file_to_directory(self.shell_script.path, test_dir, replace=replace) # Run the tests -- execute dumped shell script 'script.sh' args = ["sh", os.path.basename(self.shell_script.name)] environ = {} environ['USER'] = env.user().get_full_name() environ['HOME'] = test_dir (output, error, exitcode) = execute(args, working_directory=test_dir, environment_variables=environ) result = CheckerResult(checker=self) if self.remove: output = re.sub(self.remove, "", output) if not self.returns_html: output = '<pre>' + output + '</pre>' result.set_log(output) result.set_passed(not error) return result
def run(self, env): """ Runs tests in a special environment. Here's the actual work. This runs the check in the environment ENV, returning a CheckerResult. """ # Setup test_dir = env.tmpdir() replace = [(u"PROGRAM", env.program())] if env.program() else [] copy_file_to_directory(self.shell_script.path, test_dir, replace=replace) # Run the tests -- execute dumped shell script 'script.sh' args = ["sh", os.path.basename(self.shell_script.name)] environ = {} environ["USER"] = env.user().get_full_name() environ["HOME"] = test_dir (output, error, exitcode) = execute(args, working_directory=test_dir, environment_variables=environ) result = CheckerResult(checker=self) if self.remove: output = re.sub(self.remove, "", output) if not self.returns_html: output = "<pre>" + output + "</pre>" result.set_log(output) result.set_passed(not error) return result
def run(self, env): """ Runs tests in a special environment. Here's the actual work. This runs the check in the environment ENV, returning a CheckerResult. """ # Setup copy_file(self.shell_script.path, env.tmpdir(), to_is_directory=True) os.chmod(env.tmpdir()+'/'+os.path.basename(self.shell_script.name),0750) # Run the tests -- execute dumped shell script 'script.sh' filenames = [quote(name) for (name,content) in env.sources()] args = [env.tmpdir()+'/'+os.path.basename(self.shell_script.name)] + filenames environ = {} environ['USER'] = str(env.user().id) environ['HOME'] = env.tmpdir() environ['JAVA'] = settings.JVM environ['JAVA_SECURE'] = settings.JVM_SECURE environ['PROGRAM'] = env.program() or '' [output, error, exitcode,timed_out] = execute_arglist(args, working_directory=env.tmpdir(), environment_variables=environ,timeout=settings.TEST_TIMEOUT,fileseeklimit=settings.TEST_MAXFILESIZE) output = force_unicode(output, errors='replace') result = CheckerResult(checker=self) (output,truncated) = truncated_log(output) if self.remove: output = re.sub(self.remove, "", output) if not self.returns_html or truncated: output = '<pre>' + escape(output) + '</pre>' result.set_log(output,timed_out=timed_out,truncated=truncated) result.set_passed(not exitcode and not timed_out and not truncated) return result
def run(self, env): """ Runs tests in a special environment. Here's the actual work. This runs the check in the environment ENV, returning a CheckerResult. """ # Setup test_dir = env.tmpdir() if self.input_file: copy_file(self.input_file.path, test_dir) if self.output_file: copy_file(self.output_file.path, test_dir) replace = [(u'PROGRAM', env.program())] if env.program() else [] copy_file_to_directory(self.shell_script.path, test_dir, replace=replace) args = ["sh", os.path.basename(self.shell_script.name)] environ = {} environ['USER'] = env.user().get_full_name() environ['HOME'] = test_dir (output, error, exitcode) = execute(args, working_directory=test_dir, environment_variables=environ) result = CheckerResult(checker=self) result.set_log(output) result.set_passed(not error) return result
def run(self, env): java_builder = IgnoringJavaBuilder(_flags="", _libs=self.junit_version,_file_pattern=r"^.*\.[jJ][aA][vV][aA]$",_output_flags="") java_builder._ignore = self.ignore.split(" ") build_result = java_builder.run(env) if not build_result.passed: result = CheckerResult(checker=self) result.set_passed(False) result.set_log('<pre>' + escape(self.test_description) + '\n\n======== Test Results ======\n\n</pre><br/>\n'+build_result.log) return result environ = {} environ['UPLOAD_ROOT'] = settings.UPLOAD_ROOT environ['JAVA'] = settings.JVM environ['POLICY'] = os.path.join(os.path.join(os.path.dirname(os.path.dirname(__file__)),"scripts"),"junit.policy") cmd = [settings.JVM_SECURE, "-cp", settings.JAVA_LIBS[self.junit_version]+":.", self.runner(), self.class_name] [output, error, exitcode,timed_out] = execute_arglist(cmd, env.tmpdir(),environment_variables=environ,timeout=settings.TEST_TIMEOUT,fileseeklimit=settings.TEST_MAXFILESIZE) result = CheckerResult(checker=self) (output,truncated) = truncated_log(output) output = '<pre>' + escape(self.test_description) + '\n\n======== Test Results ======\n\n</pre><br/><pre>' + escape(output) + '</pre>' result.set_log(output,timed_out=timed_out,truncated=truncated) result.set_passed(not exitcode and not timed_out and self.output_ok(output) and not truncated) return result
def run(self, env): # Find out the path to isabaelle-process args = [settings.ISABELLE_BINARY, "getenv", "-b", "ISABELLE_PROCESS"] (output, error, exitcode, _) = execute_arglist(args, env.tmpdir()) isabelle_process = output.rstrip() thys = map(lambda (name, _): ('"%s"' % os.path.splitext(name)[0]), env.sources()) ml_cmd = 'Secure.set_secure (); use_thys [%s]' % ','.join(thys) args = [isabelle_process, "-r", "-q", "-e", ml_cmd, self.logic] (output, error, exitcode, timed_out) = execute_arglist(args, env.tmpdir(), timeout=settings.TEST_TIMEOUT) if timed_out: output += "\n\n---- check aborted after %d seconds ----\n" % settings.TEST_TIMEOUT result = CheckerResult(checker=self) result.set_log('<pre>' + escape(output) + '</pre>') result.set_passed(not timed_out and self.output_ok(output)) return result
def run(self, env): """ Build it. """ result = CheckerResult(checker=self) try: env.set_program(self.main_module(env)) except self.NotFoundError as e: result.set_log(e) result.set_passed(False) return result filenames = [quote(name) for name in self.get_file_names(env)] args = [ self.compiler() ] + self.output_flags(env) + self.flags(env) + filenames + self.libs() output = execute(args, env.tmpdir(), self.environment())[0] output = self.enhance_output(env, output) # Allow server to delete created subfolders execute('chmod -R 0777 *', env.tmpdir()) # The executable has to exist and we mustn't have any warnings. passed = not self.has_warnings(output) result.set_log( self.build_log( output, args, set(filenames).intersection([ quote(solutionfile.path()) for solutionfile in env.solution().solutionfile_set.all() ]))) result.set_passed(passed) return result
def run_file(self, env): result = CheckerResult(checker=self) clashes = [] cleanpath = string.lstrip(self.path, "/ ") if (self.unpack_zipfile): path = os.path.join(env.tmpdir(), cleanpath) unpack_zipfile_to( self.file.path, path, lambda n: clashes.append(os.path.join(cleanpath, n)), lambda f: self.add_to_environment(env, os.path.join(cleanpath, f))) else: filename = self.filename if self.filename else self.file.path source_path = os.path.join(cleanpath, os.path.basename(filename)) path = os.path.join(env.tmpdir(), source_path) overridden = os.path.exists(path) copy_file(self.file.path, path, binary=True) if overridden: clashes.append( os.path.join(self.path, os.path.basename(filename))) self.add_to_environment(env, source_path) result.set_passed(not clashes) if clashes: result.set_log( "These files already existed. Do NOT include them in your submissions:<br/><ul>\n" + "\n".join(map(lambda f: "<li>%s</li>" % escape(f), clashes)) + "</ul>") return result
def run(self, env): # Save save check configuration config_path = os.path.join(env.tmpdir(), "checks.xml") copy_file(self.configuration.path, config_path) # Run the tests args = [ settings.JVM, "-cp", settings.CHECKSTYLEALLJAR, "-Dbasedir=.", "com.puppycrawl.tools.checkstyle.Main", "-c", "checks.xml" ] + [name for (name, content) in env.sources()] [output, error, exitcode, timed_out] = execute_arglist(args, env.tmpdir()) # Remove Praktomat-Path-Prefixes from result: output = re.sub(r"^" + re.escape(env.tmpdir()) + "/+", "", output, flags=re.MULTILINE) result = CheckerResult(checker=self) log = '<pre>' + escape(output) + '</pre>' if timed_out: log = log + '<div class="error">Timeout occured!</div>' result.set_log(log) result.set_passed( not timed_out and not exitcode and (not re.match('Starting audit...\nAudit done.', output) == None)) return result
def run(self, env): java_builder = JavaBuilder(_flags="", _libs=self.junit_version,_file_pattern=r"^.*\.[jJ][aA][vV][aA]$",_output_flags="") build_result = java_builder.run(env) if not build_result.passed: result = CheckerResult(checker=self) result.set_passed(False) result.set_log('<pre>' + escape(self.test_description) + '\n\n======== Test Results ======\n\n</pre><br/>\n'+build_result.log) return result environ = {} environ['UPLOAD_ROOT'] = settings.UPLOAD_ROOT environ['JAVA'] = settings.JVM environ['POLICY'] = os.path.join(os.path.join(os.path.dirname(os.path.dirname(__file__)),"scripts"),"junit.policy") environ['USE_KILL_LOG'] = "False" environ['ULIMIT_FILESIZE'] = '128' # Have the checker script set a filesize-ulimit of 128kb # Specifically, this limits the DejaGNU .log file size, # and thus deals with Programs that output lots of junk cmd = settings.JVM_SECURE + " -cp " + settings.JAVA_LIBS[self.junit_version] + ":. " + self.runner() + " " + self.class_name [output, error, exitcode] = execute(cmd, env.tmpdir(),environment_variables=environ) result = CheckerResult(checker=self) result.set_log('<pre>' + escape(self.test_description) + '\n\n======== Test Results ======\n\n</pre><br/><pre>' + escape(output) + '</pre>') result.set_passed((not exitcode) and self.output_ok(output)) return result
def run(self, env): """ Test if all interfaces were implemented correctly If so, the interfaces are added to make it possible to compile them """ result = CheckerResult(checker=self) implemented = [] passed = 1 log = "" # Define regular expressions for interfaces implemented by a class # baseExp for classes implementing only one interface, extExp for more baseExp = "^ *((public )|(protected )|(private ))?class +[A-Z][0-9a-zA-Z_]* *implements +" extExp = baseExp + "[0-9a-zA-Z_, ]*" # Iterate through sources and find out which interfaces were implemented for (name, content) in env.sources(): for interface in [ self.interface1, self.interface2, self.interface3, self.interface4, self.interface5, self.interface6, self.interface7 ]: ##self.interface_set.all() iname = interface ##.name noComments = self._cutComments(content) # remove comments expression = re.compile(baseExp + self._cleanName(iname), re.MULTILINE) if expression.search(noComments): if not iname in implemented: implemented.append(iname) expression = re.compile(extExp + self._cleanName(iname), re.MULTILINE) if expression.search(noComments): if not iname in implemented: implemented.append(iname) # check if all interfaces were implemented for interface in [ self.interface1, self.interface2, self.interface3, self.interface4, self.interface5, self.interface6, self.interface7 ]: ##self.interface_set.all() if not interface in implemented: ## interface.name passed = 0 log += "Interface " + interface + " wurde nicht implementiert.<BR>" ## interface.name if not passed: log += u"""<p>Sie müssen alle vorgegebenen Interfaces implementieren. Bitte ändern Sie Ihr Programm so ab, dass es den Anforderungen entspricht und versuchen Sie es erneut.</p> """ result.set_log(log) result.set_passed(passed) return result
def run(self, env): """ Do whatever this checker is suposed to do. """ copy_file(self.test_case.path, env.tmpdir(), to_is_directory=True, binary=True) junit_class = os.path.basename(self.test_case.path).rsplit('.',1).pop(0) cmd = settings.JUNIT38 + " -text " + junit_class [output, error, exitcode] = execute(cmd, env.tmpdir()) result = CheckerResult(checker=self) result.set_log('<pre>' + escape(output) + '</pre>') result.set_passed(not exitcode) return result
def run(self, env): java_builder = IgnoringJavaBuilder( _flags="", _libs=self.junit_version, _file_pattern=r"^.*\.[jJ][aA][vV][aA]$", _output_flags="") java_builder._ignore = self.ignore.split(" ") build_result = java_builder.run(env) if not build_result.passed: result = CheckerResult(checker=self) result.set_passed(False) result.set_log( '<pre>' + escape(self.test_description) + '\n\n======== Test Results ======\n\n</pre><br/>\n' + build_result.log) return result environ = {} environ['UPLOAD_ROOT'] = settings.UPLOAD_ROOT environ['JAVA'] = settings.JVM environ['POLICY'] = os.path.join( os.path.join(os.path.dirname(os.path.dirname(__file__)), "scripts"), "junit.policy") cmd = [ settings.JVM_SECURE, "-cp", settings.JAVA_LIBS[self.junit_version] + ":.", self.runner(), self.class_name ] [output, error, exitcode, timed_out] = execute_arglist(cmd, env.tmpdir(), environment_variables=environ, timeout=settings.TEST_TIMEOUT, fileseeklimit=settings.TEST_MAXFILESIZE) result = CheckerResult(checker=self) (output, truncated) = truncated_log(output) output = '<pre>' + escape( self.test_description ) + '\n\n======== Test Results ======\n\n</pre><br/><pre>' + escape( output) + '</pre>' result.set_log(output, timed_out=timed_out, truncated=truncated) result.set_passed(not exitcode and not timed_out and self.output_ok(output) and not truncated) return result
def run(self, env): """ Checks if the specified text is included in a submitted file """ result = CheckerResult(checker=self) lines = [] occurances = [] passed = 1 log = "" lineNum = 1 inComment = False # search the sources for (name, content) in env.sources(): lines = self._getLines(content) lineNum = 1 for line in lines: if not inComment: if line.find('/*') >= 0: parts = line.split('/*') if parts[0].find(self.text) > 0: occurances.append((name, lineNum)) inComment = True if not inComment: parts = line.split('//') if parts[0].find(self.text) > 0: occurances.append((name, lineNum)) else: if line.find('*/') > 0: parts = line.split('*/') if len(parts) > 1: if parts[1].find(self.text) > 0: occurances.append((name, lineNum)) inComment = False lineNum += 1 # Print Results: if len(occurances) <= 0: passed = 0 log = escape(self.text) + u" kommt nicht in Ihrer Lösung vor!" else: log = escape(self.text) + " kommt an folgenden Stellen vor<br>" for (name, num) in occurances: log += escape(name) + " Zeile: " + str(num) + "<br>" result.set_log(log) result.set_passed(passed) return result
def run(self, env): """ Here's the actual work. This runs the check in the environment ENV, returning a CheckerResult. """ result = CheckerResult(checker=self) log = "" passed = 1 include_re = re.compile(self.include, re.IGNORECASE) exclude_re = re.compile(self.exclude, re.IGNORECASE) sources = env.sources() if self.include: sources = filter(lambda (name, content): include_re.search(name), sources) if self.exclude: sources = filter( lambda (name, content): not exclude_re.search(name), sources) for (name, content) in sources: if not name or not content: continue max_line_length = 0 line_number = 1 for line in string.split(content, "\n"): line = self.setup_line(line, env) if len(line) > self.max_line_length: msg = (escape(name) + ":" + ` line_number ` + ": Zeile zu breit (" + ` len(line) ` + " Zeichen)" + "<BR>") log = log + msg passed = 0 max_line_length = max(len(line), max_line_length) line_number = line_number + 1 msg = (escape(name) + ": Maximale Zeilenbreite: " + ` max_line_length ` + " Zeichen\n" + "<BR>") log = log + msg # At the end of each run, be sure to set LOG and PASSED. result.set_log(log) result.set_passed(passed) # That's all! return result
def run(self, env): result = CheckerResult(checker=self) log = "" passed = 1 user = env.user() for (fullfname, content) in env.sources(): # check anonymity # search for user ID or name regexp = re.compile((word(user.last_name) + "|" + word(user.first_name) ), re.I) match_iter = regexp.finditer(content) firstrun = 1 while 1: try: match = match_iter.next() except StopIteration: break if firstrun: log += "<H4>" + escape(fullfname) + "</H4>" log += "Die Datei enthält Ihren Namen " log += "oder Ihre Benutzerkennung:<p>" firstrun = 0 passed = 0 log += line(content, match) + "<br>" if not passed: log += u"""<p>Praktomat unterstützt <em>anonymes Bewerten</em> - der Bewerter kennt nur Ihr Programm, nicht aber Ihren Namen. Um anonymes Bewerten zu ermöglichen, darf Ihr Name nicht im Programmtext auftreten.<p> Bitte ändern Sie den Programmtext und versuchen Sie es noch einmal.""" result.set_log(log) result.set_passed(passed) return result
def run(self, env): """ Test if all interfaces were implemented correctly If so, the interfaces are added to make it possible to compile them """ result = CheckerResult(checker=self) implemented = [] passed = 1 log = "" # Define regular expressions for interfaces implemented by a class # baseExp for classes implementing only one interface, extExp for more baseExp = "^ *((public )|(protected )|(private ))?class +[A-Z][0-9a-zA-Z_]* *implements +" extExp = baseExp + "[0-9a-zA-Z_, ]*" # Iterate through sources and find out which interfaces were implemented for (name, content) in env.sources(): for interface in [self.interface1,self.interface2,self.interface3,self.interface4,self.interface5,self.interface6,self.interface7]: ##self.interface_set.all() iname = interface ##.name noComments = self._cutComments(content) # remove comments expression = re.compile(baseExp + self._cleanName(iname), re.MULTILINE) if expression.search(noComments): if not iname in implemented: implemented.append(iname) expression = re.compile(extExp + self._cleanName(iname), re.MULTILINE) if expression.search(noComments): if not iname in implemented: implemented.append(iname) # check if all interfaces were implemented for interface in [self.interface1,self.interface2,self.interface3,self.interface4,self.interface5,self.interface6,self.interface7]: ##self.interface_set.all() if not interface in implemented: ## interface.name passed = 0 log += "Interface " + escape(interface) + " wurde nicht implementiert.<BR>" ## interface.name if not passed: log += u"""<p>Sie müssen alle vorgegebenen Interfaces implementieren. Bitte ändern Sie Ihr Programm so ab, dass es den Anforderungen entspricht und versuchen Sie es erneut.</p> """ result.set_log(log) result.set_passed(passed) return result
def run(self, env): result = CheckerResult(checker=self) log = "" passed = 1 user = env.user() for (fullfname, content) in env.sources(): # check anonymity # search for user ID or name regexp = re.compile( (word(user.last_name) + "|" + word(user.first_name)), re.I) match_iter = regexp.finditer(content) firstrun = 1 while 1: try: match = match_iter.next() except StopIteration: break if firstrun: log += "<H4>" + escape(fullfname) + "</H4>" log += "Die Datei enthält Ihren Namen " log += "oder Ihre Benutzerkennung:<p>" firstrun = 0 passed = 0 log += line(content, match) + "<br>" if not passed: log += u"""<p>Praktomat unterstützt <em>anonymes Bewerten</em> - der Bewerter kennt nur Ihr Programm, nicht aber Ihren Namen. Um anonymes Bewerten zu ermöglichen, darf Ihr Name nicht im Programmtext auftreten.<p> Bitte ändern Sie den Programmtext und versuchen Sie es noch einmal.""" result.set_log(log) result.set_passed(passed) return result
def run(self, env): """ Do whatever this checker is suposed to do. """ # use env.tmpdir() to get the sandbox folder # env.sources() contains the uploades files ala [(unicode_name, unicode_content)...] - all these files will exist in the sandbox folder # env.user() returns the author of the solution # to pass information to a checker which runs at a later time save it in env, but make sure the checkers will be executet in the right order. # if you need to to create or copy files use the methods from utilities.file_operations - these will alter the owner and rights of the files apropriately # if you need to run external programs/scripts use execute(...) - this will enshure that it will be executed with a user with restricted rights - if so configured # Create a result result = CheckerResult(checker=self) # Set the massage for the user and if this checker has passed then return it. result.set_log(self.configuration_field) result.set_passed(True) return result
def run(self, env): """ Runs tests in a special environment. Here's the actual work. This runs the check in the environment ENV, returning a CheckerResult. """ filename = self.filename if self.filename else self.file.path path = os.path.join(os.path.join(env.tmpdir(),string.lstrip(self.path,"/ ")),os.path.basename(filename)) overridden = os.path.exists(path) copy_file(self.file.path, path) result = CheckerResult(checker=self) if not overridden: result.set_log("") result.set_passed(True) else: result.set_log("The file '%s' was overridden" % os.path.join(self.path, os.path.basename(self.file.path))) result.set_passed(False) source_path = os.path.join(string.lstrip(self.path,"/ "), os.path.basename(filename)) env.add_source(source_path, get_unicode(self.file.read())) return result
def run(self, env): """ Build it. """ result = CheckerResult(checker=self) # Try to find out the main modules name with only the source files present try: env.set_program(self.main_module(env)) except self.NotFoundError: pass filenames = [name for name in self.get_file_names(env)] args = [ self.compiler() ] + self.output_flags(env) + self.flags(env) + filenames + self.libs() [output, _, _, _] = execute_arglist(args, env.tmpdir(), self.environment()) output = escape(output) output = self.enhance_output(env, output) # Allow server to delete created subfolders execute('chmod -R 0777 *', env.tmpdir()) # We mustn't have any warnings. passed = not self.has_warnings(output) log = self.build_log( output, args, set(filenames).intersection([ solutionfile.path() for solutionfile in env.solution().solutionfile_set.all() ])) # Now that submission was successfully built, try to find the main modules name again try: if passed: env.set_program(self.main_module(env)) except self.NotFoundError as e: # But only complain if the main method is required if self._main_required: log += "<pre>" + str(e) + "</pre>" passed = False result.set_passed(passed) result.set_log(log) return result
def run(self, env): """ Here's the actual work. This runs the check in the environment ENV, returning a CheckerResult. """ result = CheckerResult(checker=self) log = "" passed = 1 include_re = re.compile(self.include, re.IGNORECASE) exclude_re = re.compile(self.exclude, re.IGNORECASE) sources = env.sources() if self.include: sources = filter(lambda (name, content): include_re.search(name), sources) if self.exclude: sources = filter(lambda (name, content): not exclude_re.search(name), sources) for (name, content) in sources: if not name or not content: continue max_line_length = 0 line_number = 1 for line in string.split(content, "\n"): line = self.setup_line(line, env) if len(line) > self.max_line_length: msg = ( escape(name) + ":" + `line_number` + ": Zeile zu breit (" + `len(line)` + " Zeichen)" + "<BR>") log = log + msg passed = 0 max_line_length = max(len(line), max_line_length) line_number = line_number + 1 msg = (escape(name) + ": Maximale Zeilenbreite: " + `max_line_length` + " Zeichen\n" + "<BR>") log = log + msg # At the end of each run, be sure to set LOG and PASSED. result.set_log(log) result.set_passed(passed) # That's all! return result
def run(self, env): # Save save check configuration config_path = os.path.join(env.tmpdir(), "checks.xml") copy_file(self.configuration.path, config_path) # Run the tests args = [settings.JVM, "-cp", settings.CHECKSTYLEALLJAR, "-Dbasedir=.", "com.puppycrawl.tools.checkstyle.Main", "-c", "checks.xml"] + [quote(name) for (name,content) in env.sources()] (output, error, exitcode) = execute(args, env.tmpdir()) # Remove Praktomat-Path-Prefixes from result: output = re.sub(r"^"+re.escape(env.tmpdir())+"/+","",output,flags=re.MULTILINE) result = CheckerResult(checker=self) result.set_log('<pre>' + escape(output) + '</pre>') result.set_passed(not error and not re.match('Starting audit...\nAudit done.', output) == None) return result
def run(self, env): java_builder = JavaBuilder(_flags="", _libs=self.junit_version, _file_pattern=r"^.*\.[jJ][aA][vV][aA]$", _output_flags="") build_result = java_builder.run(env) if not build_result.passed: result = CheckerResult(checker=self) result.set_passed(False) result.set_log( '<pre>' + escape(self.test_description) + '\n\n======== Test Results ======\n\n</pre><br/>\n' + build_result.log) return result environ = {} environ['UPLOAD_ROOT'] = settings.UPLOAD_ROOT environ['JAVA'] = settings.JVM environ['POLICY'] = os.path.join( os.path.join(os.path.dirname(os.path.dirname(__file__)), "scripts"), "junit.policy") environ['USE_KILL_LOG'] = "False" environ[ 'ULIMIT_FILESIZE'] = '128' # Have the checker script set a filesize-ulimit of 128kb # Specifically, this limits the DejaGNU .log file size, # and thus deals with Programs that output lots of junk cmd = settings.JVM_SECURE + " -cp " + settings.JAVA_LIBS[ self.junit_version] + ":. " + self.runner() + " " + self.class_name [output, error, exitcode] = execute(cmd, env.tmpdir(), environment_variables=environ) result = CheckerResult(checker=self) result.set_log('<pre>' + escape(self.test_description) + '\n\n======== Test Results ======\n\n</pre><br/><pre>' + escape(output) + '</pre>') result.set_passed((not exitcode) and self.output_ok(output)) return result
def run(self, env): """ Runs tests in a special environment. Here's the actual work. This runs the check in the environment ENV, returning a CheckerResult. """ # Setup copy_file(self.shell_script.path, env.tmpdir(), to_is_directory=True) os.chmod(env.tmpdir() + '/' + os.path.basename(self.shell_script.name), 0750) # Run the tests -- execute dumped shell script 'script.sh' filenames = [quote(name) for (name, content) in env.sources()] args = [env.tmpdir() + '/' + os.path.basename(self.shell_script.name) ] + filenames environ = {} environ['USER'] = str(env.user().id) environ['HOME'] = env.tmpdir() environ['JAVA'] = settings.JVM environ['JAVA_SECURE'] = settings.JVM_SECURE environ['PROGRAM'] = env.program() or '' [output, error, exitcode, timed_out] = execute_arglist(args, working_directory=env.tmpdir(), environment_variables=environ, timeout=settings.TEST_TIMEOUT, fileseeklimit=settings.TEST_MAXFILESIZE) output = force_unicode(output, errors='replace') result = CheckerResult(checker=self) (output, truncated) = truncated_log(output) if self.remove: output = re.sub(self.remove, "", output) if not self.returns_html or truncated or timed_out: output = '<pre>' + escape(output) + '</pre>' result.set_log(output, timed_out=timed_out, truncated=truncated) result.set_passed(not exitcode and not timed_out and not truncated) return result
def run(self, env): # Find out the path to isabaelle-process args = [settings.ISABELLE_BINARY, "getenv", "-b", "ISABELLE_PROCESS"] (output, error, exitcode, _) = execute_arglist(args, env.tmpdir()) isabelle_process = output.rstrip() thys = map (lambda (name,_): ('"%s"' % os.path.splitext(name)[0]), env.sources()) ml_cmd = 'Secure.set_secure (); use_thys [%s]' % ','.join(thys) args = [isabelle_process, "-r", "-q", "-e", ml_cmd, self.logic] (output, error, exitcode, timed_out) = execute_arglist(args, env.tmpdir(),timeout=settings.TEST_TIMEOUT) if timed_out: output += "\n\n---- check aborted after %d seconds ----\n" % settings.TEST_TIMEOUT result = CheckerResult(checker=self) result.set_log('<pre>' + escape(output) + '</pre>') result.set_passed(not timed_out and self.output_ok(output)) return result
def run_file(self, env): result = CheckerResult(checker=self) clashes = [] cleanpath = string.lstrip(self.path,"/ ") if (self.unpack_zipfile): path = os.path.join(env.tmpdir(),cleanpath) unpack_zipfile_to(self.file.path, path, lambda n: clashes.append(os.path.join(cleanpath, n)), lambda f: self.add_to_environment(env, os.path.join(cleanpath,f))) else: filename = self.filename if self.filename else self.file.path source_path = os.path.join(cleanpath, os.path.basename(filename)) path = os.path.join(env.tmpdir(),source_path) overridden = os.path.exists(path) copy_file(self.file.path, path, binary=True) if overridden: clashes.append(os.path.join(self.path, os.path.basename(filename))) self.add_to_environment(env, source_path) result.set_passed(not clashes) if clashes: result.set_log("These files already existed. Do NOT include them in your submissions:<br/><ul>\n" + "\n".join(map(lambda f: "<li>%s</li>" % escape(f), clashes)) + "</ul>") return result
def run(self, env): """ Runs tests in a special environment. Here's the actual work. This runs the check in the environment ENV, returning a CheckerResult. """ # Setup test_dir = env.tmpdir() if self.input_file: copy_file(self.input_file.path, test_dir) if self.output_file: copy_file(self.output_file.path, test_dir) replace = [(u'PROGRAM',env.program())] if env.program() else [] copy_file_to_directory(self.shell_script.path, test_dir, replace=replace) args = ["sh", os.path.basename(self.shell_script.name)] environ = {} environ['USER'] = env.user().get_full_name() environ['HOME'] = test_dir (output, error, exitcode) = execute(args, working_directory=test_dir, environment_variables=environ) result = CheckerResult(checker=self) result.set_log(output) result.set_passed(not error) return result
def run(self, env): """ Runs tests in a special environment. Here's the actual work. This runs the check in the environment ENV, returning a CheckerResult. """ filename = self.filename if self.filename else self.file.path path = os.path.join( os.path.join(env.tmpdir(), string.lstrip(self.path, "/ ")), os.path.basename(filename)) overridden = os.path.exists(path) copy_file(self.file.path, path) result = CheckerResult(checker=self) if not overridden: result.set_log("") result.set_passed(True) else: result.set_log( "The file '%s' was overridden" % os.path.join(self.path, os.path.basename(self.file.path))) result.set_passed(False) source_path = os.path.join(string.lstrip(self.path, "/ "), os.path.basename(filename)) env.add_source(source_path, get_unicode(self.file.read())) return result
def run(self, env): # Save save check configuration config_path = os.path.join(env.tmpdir(), "checks.xml") copy_file(self.configuration.path, config_path) # Run the tests args = [settings.JVM, "-cp", settings.CHECKSTYLEALLJAR, "-Dbasedir=.", "com.puppycrawl.tools.checkstyle.Main", "-c", "checks.xml"] + [name for (name,content) in env.sources()] [output, error, exitcode,timed_out] = execute_arglist(args, env.tmpdir()) # Remove Praktomat-Path-Prefixes from result: output = re.sub(r"^"+re.escape(env.tmpdir())+"/+","",output,flags=re.MULTILINE) result = CheckerResult(checker=self) log = '<pre>' + escape(output) + '</pre>' if timed_out: log = log + '<div class="error">Timeout occured!</div>' result.set_log(log) result.set_passed(not timed_out and not exitcode and (not re.match('Starting audit...\nAudit done.', output) == None)) return result
def run(self, env): """ Build it. """ result = CheckerResult(checker=self) filenames = [name for name in self.get_file_names(env)] args = [self.compiler()] + self.flags(env) + filenames + self.libs() [output, _, _, _] = execute_arglist(args, env.tmpdir(), self.environment()) has_main = re.search(r"^Linking ([^ ]*) ...$", output, re.MULTILINE) if has_main: self._detected_main = has_main.group(1) output = escape(output) output = self.enhance_output(env, output) # Allow server to delete created subfolders execute('chmod -R 0777 *', env.tmpdir()) # We mustn't have any warnings. passed = not self.has_warnings(output) log = self.build_log( output, args, set(filenames).intersection([ solutionfile.path() for solutionfile in env.solution().solutionfile_set.all() ])) # Now that submission was successfully built, try to find the main modules name again try: if passed: env.set_program(self.main_module(env)) except self.NotFoundError as e: passed = not self._main_required log += "<pre>" + str(e) + "</pre>" result.set_passed(passed) result.set_log(log) return result
def run(self, env): """ Build it. """ result = CheckerResult(checker=self) # Try to find out the main modules name with only the source files present try: env.set_program(self.main_module(env)) except self.NotFoundError: pass filenames = [name for name in self.get_file_names(env)] args = [self.compiler()] + self.output_flags(env) + self.flags(env) + filenames + self.libs() [output,_,_,_] = execute_arglist(args, env.tmpdir(),self.environment()) output = escape(output) output = self.enhance_output(env, output) # Allow server to delete created subfolders execute('chmod -R 0777 *', env.tmpdir()) # We mustn't have any warnings. passed = not self.has_warnings(output) log = self.build_log(output,args,set(filenames).intersection([solutionfile.path() for solutionfile in env.solution().solutionfile_set.all()])) # Now that submission was successfully built, try to find the main modules name again try: if passed : env.set_program(self.main_module(env)) except self.NotFoundError as e: # But only complain if the main method is required if self._main_required: log += "<pre>" + str(e) + "</pre>" passed = False result.set_passed(passed) result.set_log(log) return result
def run(self, env): """ Build it. """ result = CheckerResult(checker=self) try: env.set_program(self.main_module(env)) except self.NotFoundError as e: result.set_log(e) result.set_passed(False) return result filenames = [quote(name) for name in self.get_file_names(env)] args = [self.compiler()] + self.output_flags(env) + self.flags(env) + filenames + self.libs() output = execute(args, env.tmpdir(),self.environment())[0] output = self.enhance_output(env, output) # Allow server to delete created subfolders execute('chmod -R 0777 *', env.tmpdir()) # The executable has to exist and we mustn't have any warnings. passed = not self.has_warnings(output) result.set_log(self.build_log(output,args,set(filenames).intersection([quote(solutionfile.path()) for solutionfile in env.solution().solutionfile_set.all()]))) result.set_passed(passed) return result
def run(self, env): filecopy_result = self.run_file(env) if not filecopy_result.passed: return filecopy_result if self.require_safe: safe_builder = IgnoringHaskellBuilder( _flags="-XSafe", _file_pattern=r"^.*\.[hH][sS]$", _main_required=False ) safe_builder._ignore = self.ignore.split(" ") + [self.path_relative_to_sandbox()] safe_build_result = safe_builder.run(env) if not safe_build_result.passed: result = CheckerResult(checker=self) result.set_passed(False) result.set_log( "<pre>" + escape(self.test_description) + "\n\n======== Test Results (Safe) ======\n\n</pre><br/>\n" + safe_build_result.log ) return result test_builder = TestOnlyBuildingBuilder( _flags="-main-is " + self.module_name(), _libs="test-framework test-framework-quickcheck2 test-framework-hunit", ) test_builder._testsuite_filename = self.path_relative_to_sandbox() test_build_result = test_builder.run(env) if not test_build_result.passed: result = CheckerResult(checker=self) result.set_passed(False) result.set_log( "<pre>" + escape(self.test_description) + "\n\n======== Test Results (Building all) ======\n\n</pre><br/>\n" + test_build_result.log ) return result environ = {} environ["UPLOAD_ROOT"] = settings.UPLOAD_ROOT cmd = ["./" + self.module_binary_name(), "--maximum-generated-tests=5000"] [output, error, exitcode, timed_out] = execute_arglist( cmd, env.tmpdir(), environment_variables=environ, timeout=settings.TEST_TIMEOUT, fileseeklimit=settings.TEST_MAXFILESIZE, ) result = CheckerResult(checker=self) (output, truncated) = truncated_log(output) output = ( "<pre>" + escape(self.test_description) + "\n\n======== Test Results ======\n\n</pre><br/><pre>" + escape(output) + "</pre>" ) if self.include_testcase_in_report in ["FULL", "DL"]: testsuit_template = get_template("checker/checker/haskell_test_framework_report.html") output += testsuit_template.render( Context( { "showSource": (self.include_testcase_in_report == "FULL"), "testfile": self.file, "testfilename": self.path_relative_to_sandbox(), "testfileContent": encoding.get_unicode(self.file.read()), } ) ) result.set_log(output, timed_out=timed_out, truncated=truncated) result.set_passed(not exitcode and not timed_out and self.output_ok(output) and not truncated) return result
def run(self, env): """ Here's the actual work. This runs the check in the environment ENV, returning a CheckerResult. """ log = "" passed = 1 files = 0 lines = 0 comment_lines = 0 code_lines = 0 coco_lines = 0 in_long_comment = 0 # Here's how to access the sources. for (name, content) in env.sources(): assert not in_long_comment in_short_comment = 0 files = files + 1 lines_in_file = 0 comment_lines_in_file = 0 code_lines_in_file = 0 coco_lines_in_file = 0 line_has_comment = 0 line_has_code = 0 for i in range(len(content)): # sets both lookaheads (if available) la1 = content[i] la2 = '\0' if i + 1 < len(content): la2 = content[i + 1] if la1 == '\n': # new line lines_in_file = lines_in_file + 1 if line_has_comment: comment_lines_in_file = comment_lines_in_file + 1 if line_has_code: code_lines_in_file = code_lines_in_file + 1 if line_has_comment and line_has_code: coco_lines_in_file = coco_lines_in_file + 1 line_has_comment = 0 line_has_code = 0 in_short_comment = 0 continue if in_long_comment and la1 == '*' and la2 == "/": in_long_comment = 0 continue if in_long_comment or in_short_comment: if la1 in string.digits or la1 in string.letters: line_has_comment = 1 else: if la1 in string.digits or la1 in string.letters: line_has_code = 1 if la1 == "/": if la2 == "*": in_long_comment = 1 continue if la2 == "/": in_short_comment = 1 continue try: # FIXME : code_lines_in_file, comment_lines_in_file # may be 0!! log = log + ( escape(name) + ": " + ` lines_in_file ` + " Zeilen, davon " + ` code_lines_in_file ` + " Code (" + ` code_lines_in_file * 100 / lines_in_file ` + "%), " + ` comment_lines_in_file ` + " Kommentar (" + ` comment_lines_in_file * 100 / lines_in_file ` + "%), " + ` coco_lines_in_file ` + " beides (" + ` coco_lines_in_file * 100 / lines_in_file ` + "%).<br>\n") except ZeroDivisionError: # FIXME log = log + "Line Width Checker (l 178): ZeroDivisionError " + \ " (no comment / code / coco lines in file!)" lines = lines + lines_in_file comment_lines = comment_lines + comment_lines_in_file code_lines = code_lines + code_lines_in_file coco_lines = coco_lines + coco_lines_in_file # All files have been processed. try: log = log + ("<br>" + ` files ` + " Dateien, " + ` lines ` + " Zeilen, davon " + ` code_lines ` + " Code (" + ` code_lines * 100 / lines ` + "%), " + ` comment_lines ` + " Kommentar (" + ` comment_lines * 100 / lines ` + "%), " + ` coco_lines ` + " beides (" + ` coco_lines * 100 / lines ` + "%).\n") except ZeroDivisionError: # FIXME log = log + "Line Width Checker (l 197): ZeroDivisionError " + \ " (no comment / code / coco lines in file!)" # Generate the result. result = CheckerResult(checker=self) result.set_log(log) result.set_passed(passed) # That's all! return result
def run(self, env): filecopy_result = self.run_file(env) if not filecopy_result.passed: return filecopy_result if self.require_safe: safe_builder = IgnoringHaskellBuilder( _flags="-XSafe", _file_pattern=r"^.*\.[hH][sS]$", _main_required=False) safe_builder._ignore = self.ignore.split(" ") + [ self.path_relative_to_sandbox() ] safe_build_result = safe_builder.run(env) if not safe_build_result.passed: result = CheckerResult(checker=self) result.set_passed(False) result.set_log( '<pre>' + escape(self.test_description) + '\n\n======== Test Results (Safe) ======\n\n</pre><br/>\n' + safe_build_result.log) return result test_builder = TestOnlyBuildingBuilder( _flags="-main-is " + self.module_name(), _libs= "test-framework test-framework-quickcheck2 test-framework-hunit") test_builder._testsuite_filename = self.path_relative_to_sandbox() test_build_result = test_builder.run(env) if not test_build_result.passed: result = CheckerResult(checker=self) result.set_passed(False) result.set_log( '<pre>' + escape(self.test_description) + '\n\n======== Test Results (Building all) ======\n\n</pre><br/>\n' + test_build_result.log) return result environ = {} environ['UPLOAD_ROOT'] = settings.UPLOAD_ROOT cmd = [ "./" + self.module_binary_name(), "--maximum-generated-tests=5000" ] [output, error, exitcode, timed_out] = execute_arglist(cmd, env.tmpdir(), environment_variables=environ, timeout=settings.TEST_TIMEOUT, fileseeklimit=settings.TEST_MAXFILESIZE) result = CheckerResult(checker=self) (output, truncated) = truncated_log(output) output = '<pre>' + escape( self.test_description ) + '\n\n======== Test Results ======\n\n</pre><br/><pre>' + escape( output) + '</pre>' if self.include_testcase_in_report in ["FULL", "DL"]: testsuit_template = get_template( 'checker/checker/haskell_test_framework_report.html') output += testsuit_template.render( Context({ 'showSource': (self.include_testcase_in_report == "FULL"), 'testfile': self.file, 'testfilename': self.path_relative_to_sandbox(), 'testfileContent': encoding.get_unicode(self.file.read()) })) result.set_log(output, timed_out=timed_out, truncated=truncated) result.set_passed(not exitcode and not timed_out and self.output_ok(output) and not truncated) return result
def run(self, env): """ Here's the actual work. This runs the check in the environment ENV, returning a CheckerResult. """ log = "" passed = 1 files = 0 lines = 0 comment_lines = 0 code_lines = 0 coco_lines = 0 in_long_comment = 0 # Here's how to access the sources. for (name, content) in env.sources(): assert not in_long_comment in_short_comment = 0 files = files + 1 lines_in_file = 0 comment_lines_in_file = 0 code_lines_in_file = 0 coco_lines_in_file = 0 line_has_comment = 0 line_has_code = 0 for i in range(len(content)): # sets both lookaheads (if available) la1 = content[i] la2 = '\0' if i+1 < len(content): la2 = content[i+1] if la1 == '\n': # new line lines_in_file = lines_in_file + 1 if line_has_comment: comment_lines_in_file = comment_lines_in_file + 1 if line_has_code: code_lines_in_file = code_lines_in_file + 1 if line_has_comment and line_has_code: coco_lines_in_file = coco_lines_in_file + 1 line_has_comment = 0 line_has_code = 0 in_short_comment = 0 continue if in_long_comment and la1 == '*' and la2 == "/": in_long_comment = 0 continue if in_long_comment or in_short_comment: if la1 in string.digits or la1 in string.letters: line_has_comment = 1 else: if la1 in string.digits or la1 in string.letters: line_has_code = 1 if la1 == "/": if la2 == "*": in_long_comment = 1 continue if la2 == "/": in_short_comment = 1 continue try: # FIXME : code_lines_in_file, comment_lines_in_file # may be 0!! log = log + ( name + ": " + `lines_in_file` + " Zeilen, davon " + `code_lines_in_file` + " Code (" + `code_lines_in_file*100 / lines_in_file` + "%), " + `comment_lines_in_file` + " Kommentar (" + `comment_lines_in_file*100 / lines_in_file` + "%), " + `coco_lines_in_file` + " beides (" + `coco_lines_in_file*100 / lines_in_file` + "%).<br>\n") except ZeroDivisionError: # FIXME log = log + "Line Width Checker (l 178): ZeroDivisionError " + \ " (no comment / code / coco lines in file!)" lines = lines + lines_in_file comment_lines = comment_lines + comment_lines_in_file code_lines = code_lines + code_lines_in_file coco_lines = coco_lines + coco_lines_in_file try: log = log + ("<br>" + `files` + " Dateien, " + `lines` + " Zeilen, davon " + `code_lines` + " Code (" + `code_lines * 100 / lines` + "%), " + `comment_lines` + " Kommentar (" + `comment_lines * 100 / lines` + "%), " + `coco_lines` + " beides (" + `coco_lines * 100 / lines` + "%).\n") except ZeroDivisionError: # FIXME log = log + "Line Width Checker (l 197): ZeroDivisionError " + \ " (no comment / code / coco lines in file!)" # Generate the result. result = CheckerResult(checker=self) result.set_log(log) result.set_passed(passed) # That's all! return result