コード例 #1
0
def main(_):
    if (lcount("Math.min", "StudentCode/Etudiant.java") > 0):
        feedback.set_global_result('failed')
        feedback.set_global_feedback(
            _("Il est interdit d'utiliser Math.min, même en commentaires."))
        return 1
    return 0
コード例 #2
0
def handle_prohibited_statements(feedback_settings):
    result = helper.contains_prohibited_statement(feedback_settings)
    if result:
        msg = feedback_settings["status_message"].get(2, "Uncommon Failure")
        feedback.set_global_feedback(msg)
        feedback.set_global_result("failed")
        feedback.set_grade(0.0)
        sys.exit(0)
コード例 #3
0
def compilation_feedback(result):
    if result.returncode != 0:
        msg = "Your file did not compile : please don't use INGINIOUS as an IDE ..."
        print(result.stderr)
        feedback.set_global_feedback(msg)
        feedback.set_global_result("failed")
        feedback.set_grade(0.0)
        sys.exit(0)
コード例 #4
0
def feedback_result(score_ratio, feedback_settings):
    result = "success" if score_ratio >= feedback_settings[
        "quorum"] else "failed"
    feedback.set_global_result(result)
    # If coverage exercise, give him 100% if >= quorum else the basic score
    updated_score_ratio = 1.0 if result == "success" and feedback_settings[
        "feedback_kind"] == "JaCoCo" else score_ratio
    feedback.set_grade(updated_score_ratio * 100)
コード例 #5
0
def AC():
    if local:
        print('accepted')
        close()
    else:
        feedback.set_global_result("success")
        feedback.set_global_feedback('accepted')
        close()
コード例 #6
0
ファイル: feedback.py プロジェクト: Lutrix8/LEPL1402
def jar_feedback(result):
    if result.returncode != 0:
        msg = "A technical problem has occurred: please report it !"
        print(result.stderr)
        feedback.set_global_feedback(msg)
        feedback.set_global_result("failed")
        feedback.set_grade(0.0)
        sys.exit(0)
コード例 #7
0
ファイル: custom_translatable.py プロジェクト: elsys/CS1-Java
def main(_):
    if (lcount("^", "StudentCode/Etudiant.java") > 0):
        feedback.set_global_result('failed')
        feedback.set_global_feedback(
            _("Il est interdit d'utiliser l'opérateur xor (^) de java, même en commentaires."
              ))
        return 1
    return 0
コード例 #8
0
def main(_):
    if (lcount("parse", "StudentCode/Etudiant.java") > 0):
        feedback.set_global_result('failed')
        feedback.set_global_feedback(
            _("Il est interdit d'utiliser une méthode de parsing déjà implémentée, même en commentaires."
              ))
        return 1
    return 0
コード例 #9
0
def main(_):
    if (lcount("return ", "StudentCode/Etudiant.java") > 1):
        feedback.set_global_result('failed')
        feedback.set_global_feedback(
            _("Vous n'avez pas besoin de l'instruction return dans cet exercice ! Observez la signature de votre méthode."
              ))
        return 1
    return 0
コード例 #10
0
ファイル: custom_translatable.py プロジェクト: elsys/CS1-Java
def main(_):
    if (lcount("contains", "StudentCode/Etudiant.java") > 1):
        feedback.set_global_result('failed')
        feedback.set_global_feedback(
            _("Il est interdit d'utiliser la méthode contains() de la classe String, même en commentaires."
              ))
        return 1
    return 0
コード例 #11
0
def CE(msg):
    if local:
        print(msg)
        print('compile error')
        close()
    else:
        feedback.set_global_result('failed')
        feedback.set_global_feedback('compile error')
        close()
コード例 #12
0
def WA(msg):
    if local:
        print(msg)
        print('wrong answer')
        close()
    else:
        feedback.set_global_result('failed')
        feedback.set_global_feedback('wrong answer')
        close()
コード例 #13
0
def TLE(msg):
    if local:
        print(msg)
        print('time limit exceeded')
        close()
    else:
        feedback.set_global_result('failed')
        feedback.set_global_feedback('time limit exceeded')
        close()
コード例 #14
0
def main(result, feedback_settings):
    # Top level message
    msg = "{}\n".format(feedback_settings["status_message"].get(
        result.returncode, "Uncommon Failure"))
    feedback.set_global_feedback(msg, True)

    print("INSIDE CUSTOM SCRIPT")

    # No runnable method(s)
    if result.returncode == 3:
        feedback.set_global_feedback(result.stdout, True)

    # if the student didn't cheat
    if result.returncode not in [2, 3]:

        # Extract the data
        format = re.compile(
            "(?P<nBugsFound>[0-9]+)\t(?P<nBugs>[0-9]+)\t(?P<status>N?FP)\n")
        regex_result = format.search(result.stdout)
        nBugsFound = int(regex_result.group("nBugsFound"))
        nBugs = int(regex_result.group("nBugs"))
        status = regex_result.group("status")
        score_ratio = nBugsFound / nBugs

        print("nBugsFound : {} , nBugs : {} , status : {}".format(
            nBugsFound, nBugs, status))

        # Notify the user its progress
        result = "success" if score_ratio >= feedback_settings[
            "quorum"] and status == "NFP" else "failed"

        # if false positive, give him zero
        updated_score_ratio = score_ratio if status == "NFP" else 0.0
        feedback.set_global_result(result)
        feedback.set_grade(updated_score_ratio * 100)

        # Give him some explanation
        msg2 = "You have found {} bug(s) on a total of {} bugs\n".format(
            nBugsFound, nBugs)
        feedback.set_global_feedback(msg2, True)
        if status == "FP":
            feedback.set_global_feedback(
                "Your test suite generates a false positive: therefore you have 0.0%.",
                True)

    else:

        feedback.set_global_result("failed")
        feedback.set_grade(0.0)
コード例 #15
0
ファイル: feedback.py プロジェクト: Lutrix8/LEPL1402
def handle_verification(feedback_settings):
    result = helper.statement_verification(feedback_settings)
    # If not empty, there is error(s) in student code 
    if result:
        msg = feedback_settings["status_message"].get(2, "Uncommon Failure")
        feedback.set_global_feedback(msg)
        # Add message(s) to tell student where are his/her errors
        for (check, problem_id) in result:
            message = check.capitalize() + " statement(s) " \
                      + (" found " if check == "prohibited" else " missing ") + "HERE"
            feedback.set_problem_feedback(message, problem_id, True)

        feedback.set_global_result("failed")
        feedback.set_grade(0.0)
        sys.exit(0)
コード例 #16
0
ファイル: custom_translatable.py プロジェクト: elsys/CS1-Java
def main(_):
    if (lcount("Character.toUpperCase", "StudentCode/Etudiant.java") == 0):
        feedback.set_global_result('failed')
        feedback.set_global_feedback(
            _("Vous n'utilisez pas la méthode Character.toUpperCase()."))
        return 1

    with open("StudentCode/Etudiant.java", 'r') as fin:
        for line in fin:
            if ("toUpperCase" in line and not "Character.toUpperCase" in line):
                feedback.set_global_result('failed')
                feedback.set_global_feedback(
                    _("Il est interdit d'utiliser autre chose que Character.toUpperCase(), même en commentaires."
                      ))
                return 1
    return 0
コード例 #17
0
def main(result, feedback_settings):
    # Top level message
    msg = "{}\n".format(feedback_settings["status_message"].get(
        result.returncode, "Uncommon Failure"))
    feedback.set_global_feedback(msg, True)

    print("INSIDE CUSTOM SCRIPT")

    # No runnable method(s)
    if result.returncode == 3:
        feedback.set_global_feedback(result.stdout, True)

    # if the student didn't cheat
    if result.returncode not in [2, 3]:
        # Extract the data
        format = re.compile(
            "(?P<nBugsFound>[0-9]+)\t(?P<nBugs>[0-9]+)\t(?P<status>N?FP)\n")
        regex_result = format.search(result.stdout)
        nBugsFound = int(regex_result.group("nBugsFound"))
        nBugs = int(regex_result.group("nBugs"))
        status = regex_result.group("status")
        score_ratio = nBugsFound / nBugs
        extraFeedback = result.stdout.split('\n')[1:]

        print("nBugsFound : {} , nBugs : {} , status : {}".format(
            nBugsFound, nBugs, status))

        # Notify the user its progress
        result = "success" if score_ratio >= feedback_settings[
            "quorum"] and status == "NFP" else "failed"

        # if false positive, give him zero
        updated_score_ratio = score_ratio if status == "NFP" else 0.0
        feedback.set_global_result(result)
        feedback.set_grade(updated_score_ratio * 100)

        # Give him some explanation
        details = extraFeedback[0]
        listBugs = extraFeedback[1:]
        bug_feedback = rst.indent_block(1, '\n'.join(listBugs), '\t')
        global_feedback = details + bug_feedback
        feedback.set_global_feedback(global_feedback)
    else:
        feedback.set_global_result("failed")
        feedback.set_grade(0.0)
コード例 #18
0
ファイル: feedback.py プロジェクト: Lutrix8/LEPL1402
def custom_result_feedback(result, feedback_settings):
    # Dynamically load modules we need
    # Credits to https://stackoverflow.com/a/67692/6149867
    # And for the explanation : http://www.blog.pythonlibrary.org/2016/05/27/python-201-an-intro-to-importlib/
    def dynamically_load_module(module, path):
        spec = importlib.util.spec_from_file_location(module, path)
        mod = importlib.util.module_from_spec(spec)
        spec.loader.exec_module(mod)
        return mod
    try:
        custom_feedback_path = str(CWD / feedback_settings["custom_feedback_script"])
        custom_feedback_module = dynamically_load_module("custom_feedback_module", custom_feedback_path)
        custom_feedback_module.main(result, feedback_settings)
    except (RuntimeError, ImportError, BaseException):
        traceback.print_exc() # useful for debugging a custom script that failed
        feedback.set_global_feedback("A technical problem has occurred in the custom feedback script: please report it !")
        feedback.set_global_result("failed")
        feedback.set_grade(0.0)
        sys.exit(0)
コード例 #19
0
ファイル: feedback.py プロジェクト: UCL-INGI/LEPL1402
def compilation_feedback(result, full_log):
    if result.returncode != 0:
        errors = extract_compilation_errors(result.stderr)

        # If any errors come from the templates, blame the student with this code
        templates_folder = helper.relative_path(PATH_TEMPLATES)
        if any(error.get("source", "templates") == templates_folder for error in errors):
            # Generate an custom RST report that will see the student
            msg = generate_compilation_errors_table(errors, [PATH_TEMPLATES])

            # Send it to Inginious
            feedback.set_global_feedback(msg, True)

        else:
            if full_log:
                # A teaching assitant wants to debug this task and no error is coming from template folder
                msg = generate_compilation_errors_table(errors, [PATH_SRC, PATH_FLAVOUR])

                # Send it to Inginious
                feedback.set_global_feedback(msg, True)

                # For debug purposes
                print(result.stderr)
            else:

                # Either the student has made mistake(s) eg in the signature(s) of method(s) ....
                # Or a TA adds code that doesn't compile
                feedback.set_global_feedback(
                    "{}{}".format(
                        "You modified the signature of at least a function or modified the content of a class.",
                        "You should not; the tests fail to compile"
                    ))

        # Final instructions commons for all scenario

        feedback.set_global_result("failed")
        feedback.set_grade(0.0)
        sys.exit(0)
コード例 #20
0
ファイル: feedback.py プロジェクト: Lutrix8/LEPL1402
def result_feedback(result, feedback_settings):
    # Top level message
    msg = "{}\n".format(feedback_settings["status_message"].get(result.returncode, "Uncommon Failure"))

    # if we have a feedback, use it
    if feedback_settings["has_feedback"]:

        try:
            strategy = FEEDBACK_STRATEGIES[feedback_settings["feedback_kind"]]
            strategy(result, feedback_settings, msg)
        except (KeyError, RuntimeError, BaseException):
            traceback.print_exc() # useful for debugging a custom script that failed
            feedback.set_global_result("failed")
            msg = "A technical problem has occurred using the feedback strategy : {} . Please report it !" \
                .format(feedback_settings["feedback_kind"])
            feedback.set_global_feedback(msg)
            feedback.set_grade(0.0)
            sys.exit(0)

    # For exercises with binary result : 0 or 100
    else:
        feedback.set_global_feedback(msg, True)
        score_ratio = 1.0 if result.returncode == 0 else 0.0
        feedback_result(score_ratio, feedback_settings)
コード例 #21
0
def main():
    input.parse_template(
        "correction.py"
    )  #Replace correction.py by your filename on this line and the next
    p = subprocess.Popen(shlex.split("python3 correction.py"),
                         stderr=subprocess.STDOUT,
                         stdout=subprocess.PIPE)
    make_output = p.communicate()[0].decode('utf-8')
    if p.returncode:
        feedback.set_global_result("failed")
        feedback.set_global_feedback(
            "Your code could not be executed. Please verify that all your blocks are correctly connected."
        )
        exit(0)
    elif make_output == "True\n":
        feedback.set_global_result("success")
        feedback.set_global_feedback("You solved the task !")
    else:
        feedback.set_global_result("failed")
        feedback.set_global_feedback("You made a mistake ! " + make_output)
コード例 #22
0
def run():
    input.parse_template(
        "correction.py"
    )  #Replace correction.py by your filename on this line and the next
    p = subprocess.Popen(shlex.split("python3 correction.py"),
                         stderr=subprocess.STDOUT,
                         stdout=subprocess.PIPE)
    make_output = p.communicate()[0].decode('utf-8')
    if p.returncode:
        feedback.set_global_result("crash")
        feedback.set_global_feedback("""
CRASH
-----
Le code n'a pas pu être exécuté. Vérifie que tes blocs sont bien assemblés.

Détails::

    {}

""".format(make_output))
        exit(0)
    elif make_output == "True\n":
        feedback.set_global_result("success")
        feedback.set_global_feedback("""
BONNE REPONSE
-------------
Bravo, tu as résolu cet exercice !
""")
    else:
        feedback.set_global_result("failed")
        feedback.set_global_feedback("""
MAUVAISE REPONSE
----------------
Tu as fait une erreur !

Détails::

    {}
    
""".format(make_output))
コード例 #23
0
        feedback.set_problem_feedback(
            "Votre réponse doit contenir des parenthèses pour délimiter le point",
            "q1")  #Ici
    if check == "lettre":
        feedback.set_problem_feedback(
            "Votre réponse ne doit contenir que des chiffres", "q1")  #Ici
    if check == "separateur":
        feedback.set_problem_feedback(
            "Les coordonnées doivent être séparées par une virgule",
            "q1")  #Ici

elif check == True:
    if is_correct(
            answer, "(2,2)"
    ) == False:  #Remplacez '(2,2)' par la réponse correcte, si la réponse à la question est (5,6), écrivez "(5,6)"
        feedback.set_problem_result("failed", "q1")  #Ici
        feedback.set_problem_feedback("Votre réponse est incorrect",
                                      "q1")  #Ici
    elif is_correct(
            answer, "(2,2)"
    ) == True:  #Remplacez '(2,2)' par la réponse correcte, si la réponse à la question est (5,6), écrivez "(5,6)"
        feedback.set_problem_result("success", "q1")  #Ici
        feedback.set_problem_feedback("Bravo!", "q1")  #Ici
        grade += 100

feedback.set_grade(grade)
if grade == 100:
    feedback.set_global_result("success")
else:
    feedback.set_global_result("failed")
コード例 #24
0
def run(customscript, execcustom, nexercices, tests=[], runner='Runner'):
    """ Parse les réponse des étudiant, compile et lance les tests et donne le feedback aux étudiant

    Keyword arguments:
    customscript -- nom d'un script personnalisé
    execcustom -- Si oui (valeur != 0) ou non (0) il faut exécuter le script personnalisé customscript
    nexercices -- la nombre d'exercice dans la tâche
    tests -- Fichiers de test à lancer
    runner -- Fichier runner (default 'Runner')
    """
    #Récupération des fichiers de tests si jamais il ne sont pas fournis à l'appel de la méthode
    if not tests:
        tests = get_test_files(runner)
    code_litteral = ".. code-block::\n\n"
    parsetemplate()  # Parse les réponses de l'étudiant
    if execcustom != 0:  # On doit exécuter le script personnalsé
        # If this is a python script we call the main() method with the _() function to transmit the translation mechanism
        if (customscript == "custom_translatable.py"):
            from custom_translatable import main
            outcustom = main(_)
        else:
            outcustom = subprocess.call(['./' + customscript],
                                        universal_newlines=True)
        if outcustom != 0:  # Le script a renvoyé une erreur
            exit()

    # On compile les fichier. La fonction map applique une fonction (argument 1) sur une liste (argument 2)
    # L'expression lambda définit une fonction anonyme qui ajoute le dossiers src et l'extension .java aux nom de fichier tests
    anonymous_fun = lambda file: './src/' + file + '.java'  # Create anonymous funcntion
    Log = compile_files([anonymous_fun(file) for file in tests + [runner]])
    if Log == "":  # La compilation a réussie
        with open('err.txt', 'w+', encoding="utf-8") as f:
            # On lance le runner
            os.chdir('./student')
            java_cmd = "run_student java -ea -cp " + librairies()
            # On passe comme argument au fichier runner les fichier de tests (Voir documentation runner)
            resproc = subprocess.Popen(shlex.split(java_cmd) +
                                       ['src/' + runner] + tests,
                                       universal_newlines=True,
                                       stderr=f,
                                       stdout=subprocess.PIPE)
            resproc.communicate()
            resultat = resproc.returncode
            f.flush()
            f.seek(0)
            outerr = f.read()
            print(
                outerr
            )  # On affiche la sortie de stderr dans les informations de debug
            if resultat == 127:  # Les tests ont réussis
                feedback.set_global_result('success')
            elif resultat == 252:  # Limite de mémoire dépassée
                feedback.set_global_result('failed')
                feedback.set_global_feedback(
                    _("La limite de mémoire de votre programme est dépassée"))
            elif resultat == 253:  # timeout
                feedback.set_global_result('failed')
                feedback.set_global_feedback(
                    _("La limite de temps d'exécution de votre programme est dépassée"
                      ))
            else:  # Les tests ont échouées
                if nexercices == 1:
                    outerr = add_indentation_level(
                        outerr
                    )  # On ajoute de l'indentation pour que ça s'affiche dans un cadre gris pour les étudiants
                    feedback.set_global_result('failed')
                    feedback.set_global_feedback(
                        _("Il semble que vous ayiez fait des erreurs dans votre code…\n\n"
                          ) + code_litteral + outerr + "\n")
                else:
                    i = 1
                    while i <= nexercices:
                        """
                        Cette regex va matcher tout ce qui se trouve entre
                            - @i (avec i le numéro du sous-problème que l'on considère
                            - @<un ou plusieurs chiffre>

                        et donc matcher tout les feedback associés au sous-problème que l'on considère
                        """
                        regex = '@' + str(i) + ' :\n(.*?)(?=@\d+ :|$)'
                        regex_question = re.findall(regex, outerr, re.DOTALL)
                        if len(
                                regex_question
                        ) == 0:  # Il n'y a pas de match pour la regex, le sous-problème est bien répondu
                            feedback.set_problem_feedback(
                                _("Vous avez bien répondu à cette question"),
                                "q" + str(i))
                        else:
                            outerr_question = ''.join(
                                regex_question
                            )  # On remet tout les feedback trouvé en un seul
                            outerr_question = add_indentation_level(
                                outerr_question)  # on l'indente
                            feed = _(
                                "Il semble que vous ayiez fait des erreurs dans votre code…\n\n"
                            ) + code_litteral + outerr_question + "\n"
                            feedback.set_problem_feedback(feed, "q" + str(i))
                        i += 1
    else:  # La compilation a raté
        Log = add_indentation_level(Log)
        feed = _(
            "Le programme ne compile pas : \n\n") + code_litteral + Log + "\n"
        feedback.set_global_result('failed')
        feedback.set_global_feedback(feed)
コード例 #25
0
ファイル: feedback.py プロジェクト: Lutrix8/LEPL1402
def compilation_feedback(result):
    if result.returncode != 0:
        errors = extract_compilation_errors(result.stderr)

        # If any errors come from the templates, blame the student with this code
        templates_folder = helper.relative_path(PATH_TEMPLATES)
        if any(error.get("source", "templates") == templates_folder for error in errors):
            # Generate an custom RST report that will see the student
            msg = ""
            # Don't add \r to that as it produces strange results
            next_line = "\n"
            indentation = 4
            headers = ["File", "Line", "Error Message", "Code"]

            # Headers
            msg += ".. csv-table:: " + next_line
            # Need a symbol that isn't in the Java
            msg += " " * indentation + ":quote: §" + next_line
            msg += " " * indentation + ":header: " + ",".join(headers) + next_line
            msg += " " * indentation + ":widths: auto" + next_line * 2

            # Contents
            for error in [error for error in errors if error.get("source", "Unknown Source") == templates_folder]:
                # Print File , Line and Error message without problem
                msg += " " * indentation + "§{}§".format(error.get("file", "Unknown Filename"))
                msg += ",§{}§".format(error.get("line", "Unknown Line Number"))
                msg += ",§{}§".format(error.get("message", "Unknown Message"))

                # Print the code
                code_lines = error.get("code", [])

                # For whatever reason, INGINIOUS might truncate the stderr message if too long
                # It might break the CSV table ... so we need this fix
                if not code_lines:

                    # Might be confusing but they are the rules of RST for empty block
                    msg += ",§§" + next_line

                else:

                    msg += ",§.. code-block:: java" + next_line * 2
                    indentation_for_code = indentation + 1

                    for code_line in code_lines:
                        # as we are in a code block, we need indentation + 1 in order to create compilable code in RST
                        msg += " " * indentation_for_code + code_line

                        # needed test to correctly format things
                        if code_line != code_lines[-1]:
                            msg += next_line

                    # At the end , we should not forget the quote symbol and the next line
                    msg += "§" + next_line

            # Send it to Inginious
            feedback.set_global_feedback(msg, True)

        else:

            # Either the student has made mistake(s) eg in the signature(s) of method(s) ....
            # Or a TA adds code that doesn't compile
            feedback.set_global_feedback(
                "{}{}".format(
                    "You modified the signature of at least a function or modified the content of a class.",
                    "You should not; the tests fail to compile"
                ))

            # For debug purposes ; if the student code isn't to blame
            print(result.stderr)

        # Final instructions commons for all scenario

        feedback.set_global_result("failed")
        feedback.set_grade(0.0)
        sys.exit(0)
コード例 #26
0
def error(msg):
    feedback.set_global_result("crash")
    feedback.set_global_feedback(msg)
    exit(0)