def handle_prohibited_statements(feedback_settings): result = helper.contains_prohibited_statement(feedback_settings) if result: msg = feedback_settings["status_message"].get(2, "Uncommon Failure") feedback.set_global_feedback(msg) feedback.set_global_result("failed") feedback.set_grade(0.0) sys.exit(0)
def compilation_feedback(result): if result.returncode != 0: msg = "Your file did not compile : please don't use INGINIOUS as an IDE ..." print(result.stderr) feedback.set_global_feedback(msg) feedback.set_global_result("failed") feedback.set_grade(0.0) sys.exit(0)
def jar_feedback(result): if result.returncode != 0: msg = "A technical problem has occurred: please report it !" print(result.stderr) feedback.set_global_feedback(msg) feedback.set_global_result("failed") feedback.set_grade(0.0) sys.exit(0)
def feedback_result(score_ratio, feedback_settings): result = "success" if score_ratio >= feedback_settings[ "quorum"] else "failed" feedback.set_global_result(result) # If coverage exercise, give him 100% if >= quorum else the basic score updated_score_ratio = 1.0 if result == "success" and feedback_settings[ "feedback_kind"] == "JaCoCo" else score_ratio feedback.set_grade(updated_score_ratio * 100)
def main(result, feedback_settings): # Top level message msg = "{}\n".format(feedback_settings["status_message"].get( result.returncode, "Uncommon Failure")) feedback.set_global_feedback(msg, True) print("INSIDE CUSTOM SCRIPT") # No runnable method(s) if result.returncode == 3: feedback.set_global_feedback(result.stdout, True) # if the student didn't cheat if result.returncode not in [2, 3]: # Extract the data format = re.compile( "(?P<nBugsFound>[0-9]+)\t(?P<nBugs>[0-9]+)\t(?P<status>N?FP)\n") regex_result = format.search(result.stdout) nBugsFound = int(regex_result.group("nBugsFound")) nBugs = int(regex_result.group("nBugs")) status = regex_result.group("status") score_ratio = nBugsFound / nBugs print("nBugsFound : {} , nBugs : {} , status : {}".format( nBugsFound, nBugs, status)) # Notify the user its progress result = "success" if score_ratio >= feedback_settings[ "quorum"] and status == "NFP" else "failed" # if false positive, give him zero updated_score_ratio = score_ratio if status == "NFP" else 0.0 feedback.set_global_result(result) feedback.set_grade(updated_score_ratio * 100) # Give him some explanation msg2 = "You have found {} bug(s) on a total of {} bugs\n".format( nBugsFound, nBugs) feedback.set_global_feedback(msg2, True) if status == "FP": feedback.set_global_feedback( "Your test suite generates a false positive: therefore you have 0.0%.", True) else: feedback.set_global_result("failed") feedback.set_grade(0.0)
def handle_verification(feedback_settings): result = helper.statement_verification(feedback_settings) # If not empty, there is error(s) in student code if result: msg = feedback_settings["status_message"].get(2, "Uncommon Failure") feedback.set_global_feedback(msg) # Add message(s) to tell student where are his/her errors for (check, problem_id) in result: message = check.capitalize() + " statement(s) " \ + (" found " if check == "prohibited" else " missing ") + "HERE" feedback.set_problem_feedback(message, problem_id, True) feedback.set_global_result("failed") feedback.set_grade(0.0) sys.exit(0)
def main(result, feedback_settings): # Top level message msg = "{}\n".format(feedback_settings["status_message"].get( result.returncode, "Uncommon Failure")) feedback.set_global_feedback(msg, True) print("INSIDE CUSTOM SCRIPT") # No runnable method(s) if result.returncode == 3: feedback.set_global_feedback(result.stdout, True) # if the student didn't cheat if result.returncode not in [2, 3]: # Extract the data format = re.compile( "(?P<nBugsFound>[0-9]+)\t(?P<nBugs>[0-9]+)\t(?P<status>N?FP)\n") regex_result = format.search(result.stdout) nBugsFound = int(regex_result.group("nBugsFound")) nBugs = int(regex_result.group("nBugs")) status = regex_result.group("status") score_ratio = nBugsFound / nBugs extraFeedback = result.stdout.split('\n')[1:] print("nBugsFound : {} , nBugs : {} , status : {}".format( nBugsFound, nBugs, status)) # Notify the user its progress result = "success" if score_ratio >= feedback_settings[ "quorum"] and status == "NFP" else "failed" # if false positive, give him zero updated_score_ratio = score_ratio if status == "NFP" else 0.0 feedback.set_global_result(result) feedback.set_grade(updated_score_ratio * 100) # Give him some explanation details = extraFeedback[0] listBugs = extraFeedback[1:] bug_feedback = rst.indent_block(1, '\n'.join(listBugs), '\t') global_feedback = details + bug_feedback feedback.set_global_feedback(global_feedback) else: feedback.set_global_result("failed") feedback.set_grade(0.0)
def custom_result_feedback(result, feedback_settings): # Dynamically load modules we need # Credits to https://stackoverflow.com/a/67692/6149867 # And for the explanation : http://www.blog.pythonlibrary.org/2016/05/27/python-201-an-intro-to-importlib/ def dynamically_load_module(module, path): spec = importlib.util.spec_from_file_location(module, path) mod = importlib.util.module_from_spec(spec) spec.loader.exec_module(mod) return mod try: custom_feedback_path = str(CWD / feedback_settings["custom_feedback_script"]) custom_feedback_module = dynamically_load_module("custom_feedback_module", custom_feedback_path) custom_feedback_module.main(result, feedback_settings) except (RuntimeError, ImportError, BaseException): traceback.print_exc() # useful for debugging a custom script that failed feedback.set_global_feedback("A technical problem has occurred in the custom feedback script: please report it !") feedback.set_global_result("failed") feedback.set_grade(0.0) sys.exit(0)
def compilation_feedback(result, full_log): if result.returncode != 0: errors = extract_compilation_errors(result.stderr) # If any errors come from the templates, blame the student with this code templates_folder = helper.relative_path(PATH_TEMPLATES) if any(error.get("source", "templates") == templates_folder for error in errors): # Generate an custom RST report that will see the student msg = generate_compilation_errors_table(errors, [PATH_TEMPLATES]) # Send it to Inginious feedback.set_global_feedback(msg, True) else: if full_log: # A teaching assitant wants to debug this task and no error is coming from template folder msg = generate_compilation_errors_table(errors, [PATH_SRC, PATH_FLAVOUR]) # Send it to Inginious feedback.set_global_feedback(msg, True) # For debug purposes print(result.stderr) else: # Either the student has made mistake(s) eg in the signature(s) of method(s) .... # Or a TA adds code that doesn't compile feedback.set_global_feedback( "{}{}".format( "You modified the signature of at least a function or modified the content of a class.", "You should not; the tests fail to compile" )) # Final instructions commons for all scenario feedback.set_global_result("failed") feedback.set_grade(0.0) sys.exit(0)
def result_feedback(result, feedback_settings): # Top level message msg = "{}\n".format(feedback_settings["status_message"].get(result.returncode, "Uncommon Failure")) # if we have a feedback, use it if feedback_settings["has_feedback"]: try: strategy = FEEDBACK_STRATEGIES[feedback_settings["feedback_kind"]] strategy(result, feedback_settings, msg) except (KeyError, RuntimeError, BaseException): traceback.print_exc() # useful for debugging a custom script that failed feedback.set_global_result("failed") msg = "A technical problem has occurred using the feedback strategy : {} . Please report it !" \ .format(feedback_settings["feedback_kind"]) feedback.set_global_feedback(msg) feedback.set_grade(0.0) sys.exit(0) # For exercises with binary result : 0 or 100 else: feedback.set_global_feedback(msg, True) score_ratio = 1.0 if result.returncode == 0 else 0.0 feedback_result(score_ratio, feedback_settings)
feedback.set_problem_feedback( "Votre réponse doit contenir des parenthèses pour délimiter le point", "q1") #Ici if check == "lettre": feedback.set_problem_feedback( "Votre réponse ne doit contenir que des chiffres", "q1") #Ici if check == "separateur": feedback.set_problem_feedback( "Les coordonnées doivent être séparées par une virgule", "q1") #Ici elif check == True: if is_correct( answer, "(2,2)" ) == False: #Remplacez '(2,2)' par la réponse correcte, si la réponse à la question est (5,6), écrivez "(5,6)" feedback.set_problem_result("failed", "q1") #Ici feedback.set_problem_feedback("Votre réponse est incorrect", "q1") #Ici elif is_correct( answer, "(2,2)" ) == True: #Remplacez '(2,2)' par la réponse correcte, si la réponse à la question est (5,6), écrivez "(5,6)" feedback.set_problem_result("success", "q1") #Ici feedback.set_problem_feedback("Bravo!", "q1") #Ici grade += 100 feedback.set_grade(grade) if grade == 100: feedback.set_global_result("success") else: feedback.set_global_result("failed")
for pid, result in tests_result.items(): if result: feedback.set_problem_result("success", pid) else: feedback.set_problem_result("failed", pid) with open("../task.yaml", 'r') as stream: problems = yaml.load(stream)['problems'] for name, meta in problems.items(): if meta['type'] == 'match': answer = input.get_input(name) if answer == meta['answer']: feedback.set_problem_result("success", name) feedback.set_problem_feedback( "Votre réponse est correcte. (1/1 pts)", name, True) score += 1 else: feedback.set_problem_result("failed", name) feedback.set_problem_feedback( "Votre réponse est incorrecte. (0/1 pts)", name, True) total += 1 score = 100 * score / (total if not total == 0 else 1) feedback.set_grade(score) global_result = "success" if (score >= 50 and use_fifty) or ( score == 100 and not use_fifty) else "failed" feedback.set_global_result(global_result)
def compilation_feedback(result): if result.returncode != 0: errors = extract_compilation_errors(result.stderr) # If any errors come from the templates, blame the student with this code templates_folder = helper.relative_path(PATH_TEMPLATES) if any(error.get("source", "templates") == templates_folder for error in errors): # Generate an custom RST report that will see the student msg = "" # Don't add \r to that as it produces strange results next_line = "\n" indentation = 4 headers = ["File", "Line", "Error Message", "Code"] # Headers msg += ".. csv-table:: " + next_line # Need a symbol that isn't in the Java msg += " " * indentation + ":quote: §" + next_line msg += " " * indentation + ":header: " + ",".join(headers) + next_line msg += " " * indentation + ":widths: auto" + next_line * 2 # Contents for error in [error for error in errors if error.get("source", "Unknown Source") == templates_folder]: # Print File , Line and Error message without problem msg += " " * indentation + "§{}§".format(error.get("file", "Unknown Filename")) msg += ",§{}§".format(error.get("line", "Unknown Line Number")) msg += ",§{}§".format(error.get("message", "Unknown Message")) # Print the code code_lines = error.get("code", []) # For whatever reason, INGINIOUS might truncate the stderr message if too long # It might break the CSV table ... so we need this fix if not code_lines: # Might be confusing but they are the rules of RST for empty block msg += ",§§" + next_line else: msg += ",§.. code-block:: java" + next_line * 2 indentation_for_code = indentation + 1 for code_line in code_lines: # as we are in a code block, we need indentation + 1 in order to create compilable code in RST msg += " " * indentation_for_code + code_line # needed test to correctly format things if code_line != code_lines[-1]: msg += next_line # At the end , we should not forget the quote symbol and the next line msg += "§" + next_line # Send it to Inginious feedback.set_global_feedback(msg, True) else: # Either the student has made mistake(s) eg in the signature(s) of method(s) .... # Or a TA adds code that doesn't compile feedback.set_global_feedback( "{}{}".format( "You modified the signature of at least a function or modified the content of a class.", "You should not; the tests fail to compile" )) # For debug purposes ; if the student code isn't to blame print(result.stderr) # Final instructions commons for all scenario feedback.set_global_result("failed") feedback.set_grade(0.0) sys.exit(0)