예제 #1
0
def main(_):
    if (lcount("Math.min", "StudentCode/Etudiant.java") > 0):
        feedback.set_global_result('failed')
        feedback.set_global_feedback(
            _("Il est interdit d'utiliser Math.min, même en commentaires."))
        return 1
    return 0
예제 #2
0
 def java_grading(result, feedback_settings, msg):
     score_ratio, msg2 = extract_java_grading_result(result, feedback_settings)
     # To prevent some genius to have success grade with a prohibited
     score_ratio = 0.0 if result.returncode == 2 else score_ratio
     feedback_result(score_ratio, feedback_settings)
     feedback.set_global_feedback(msg, True)
     feedback.set_global_feedback(msg2, True)
예제 #3
0
def result_feedback(result, feedback_settings):
    # Top level message
    msg = "{}\n".format(feedback_settings["status_message"].get(result.returncode, "Uncommon Failure"))

    # if we have a feedback, use it
    if feedback_settings["has_feedback"]:

        # JavaGrading
        if feedback_settings["feedback_kind"] == "JavaGrading":
            score_ratio, msg = extract_java_grading_result(result, feedback_settings)
            # To prevent some genius to have success grade with a prohibited
            score_ratio = 0.0 if result.returncode == 2 else score_ratio
            feedback_result(score_ratio, feedback_settings)
            feedback.set_global_feedback(msg, True)

        # JaCoCo
        if feedback_settings["feedback_kind"] == "JaCoCo":
            if result.returncode == 0:
                score_ratio, msg = extract_jacoco_result(feedback_settings)
                feedback_result(score_ratio, feedback_settings)
                message_index = 0 if score_ratio >= feedback_settings["quorum"] else 3
                msg2 = "{}\n".format(feedback_settings["status_message"].get(message_index, "Uncommon Failure"))
                feedback.set_global_feedback(msg2, True)
                feedback.set_global_feedback(rst.get_codeblock("java", msg), True)
            else:
                feedback.set_global_feedback(msg, True)
                feedback_result(0.0, feedback_settings)

    # For exercises with binary result : 0 or 100
    else:
        feedback.set_global_feedback(msg, True)
        score_ratio = 1.0 if result.returncode == 0 else 0.0
        feedback_result(score_ratio, feedback_settings)
예제 #4
0
def main(_):
    if (lcount("return ", "StudentCode/Etudiant.java") > 1):
        feedback.set_global_result('failed')
        feedback.set_global_feedback(
            _("Vous n'avez pas besoin de l'instruction return dans cet exercice ! Observez la signature de votre méthode."
              ))
        return 1
    return 0
예제 #5
0
def jar_feedback(result):
    if result.returncode != 0:
        msg = "A technical problem has occurred: please report it !"
        print(result.stderr)
        feedback.set_global_feedback(msg)
        feedback.set_global_result("failed")
        feedback.set_grade(0.0)
        sys.exit(0)
예제 #6
0
def handle_prohibited_statements(feedback_settings):
    result = helper.contains_prohibited_statement(feedback_settings)
    if result:
        msg = feedback_settings["status_message"].get(2, "Uncommon Failure")
        feedback.set_global_feedback(msg)
        feedback.set_global_result("failed")
        feedback.set_grade(0.0)
        sys.exit(0)
예제 #7
0
def main(_):
    if (lcount("contains", "StudentCode/Etudiant.java") > 1):
        feedback.set_global_result('failed')
        feedback.set_global_feedback(
            _("Il est interdit d'utiliser la méthode contains() de la classe String, même en commentaires."
              ))
        return 1
    return 0
예제 #8
0
def compilation_feedback(result):
    if result.returncode != 0:
        msg = "Your file did not compile : please don't use INGINIOUS as an IDE ..."
        print(result.stderr)
        feedback.set_global_feedback(msg)
        feedback.set_global_result("failed")
        feedback.set_grade(0.0)
        sys.exit(0)
def AC():
    if local:
        print('accepted')
        close()
    else:
        feedback.set_global_result("success")
        feedback.set_global_feedback('accepted')
        close()
예제 #10
0
def main(_):
    if (lcount("^", "StudentCode/Etudiant.java") > 0):
        feedback.set_global_result('failed')
        feedback.set_global_feedback(
            _("Il est interdit d'utiliser l'opérateur xor (^) de java, même en commentaires."
              ))
        return 1
    return 0
예제 #11
0
def main(_):
    if (lcount("parse", "StudentCode/Etudiant.java") > 0):
        feedback.set_global_result('failed')
        feedback.set_global_feedback(
            _("Il est interdit d'utiliser une méthode de parsing déjà implémentée, même en commentaires."
              ))
        return 1
    return 0
def TLE(msg):
    if local:
        print(msg)
        print('time limit exceeded')
        close()
    else:
        feedback.set_global_result('failed')
        feedback.set_global_feedback('time limit exceeded')
        close()
def WA(msg):
    if local:
        print(msg)
        print('wrong answer')
        close()
    else:
        feedback.set_global_result('failed')
        feedback.set_global_feedback('wrong answer')
        close()
def CE(msg):
    if local:
        print(msg)
        print('compile error')
        close()
    else:
        feedback.set_global_result('failed')
        feedback.set_global_feedback('compile error')
        close()
예제 #15
0
def handle_verification(feedback_settings):
    result = helper.statement_verification(feedback_settings)
    # If not empty, there is error(s) in student code 
    if result:
        msg = feedback_settings["status_message"].get(2, "Uncommon Failure")
        feedback.set_global_feedback(msg)
        # Add message(s) to tell student where are his/her errors
        for (check, problem_id) in result:
            message = check.capitalize() + " statement(s) " \
                      + (" found " if check == "prohibited" else " missing ") + "HERE"
            feedback.set_problem_feedback(message, problem_id, True)

        feedback.set_global_result("failed")
        feedback.set_grade(0.0)
        sys.exit(0)
예제 #16
0
def main(_):
    if (lcount("Character.toUpperCase", "StudentCode/Etudiant.java") == 0):
        feedback.set_global_result('failed')
        feedback.set_global_feedback(
            _("Vous n'utilisez pas la méthode Character.toUpperCase()."))
        return 1

    with open("StudentCode/Etudiant.java", 'r') as fin:
        for line in fin:
            if ("toUpperCase" in line and not "Character.toUpperCase" in line):
                feedback.set_global_result('failed')
                feedback.set_global_feedback(
                    _("Il est interdit d'utiliser autre chose que Character.toUpperCase(), même en commentaires."
                      ))
                return 1
    return 0
예제 #17
0
def custom_result_feedback(result, feedback_settings):
    # Dynamically load modules we need
    # Credits to https://stackoverflow.com/a/67692/6149867
    # And for the explanation : http://www.blog.pythonlibrary.org/2016/05/27/python-201-an-intro-to-importlib/
    def dynamically_load_module(module, path):
        spec = importlib.util.spec_from_file_location(module, path)
        mod = importlib.util.module_from_spec(spec)
        spec.loader.exec_module(mod)
        return mod
    try:
        custom_feedback_path = str(CWD / feedback_settings["custom_feedback_script"])
        custom_feedback_module = dynamically_load_module("custom_feedback_module", custom_feedback_path)
        custom_feedback_module.main(result, feedback_settings)
    except (RuntimeError, ImportError, BaseException):
        traceback.print_exc() # useful for debugging a custom script that failed
        feedback.set_global_feedback("A technical problem has occurred in the custom feedback script: please report it !")
        feedback.set_global_result("failed")
        feedback.set_grade(0.0)
        sys.exit(0)
예제 #18
0
def main(result, feedback_settings):
    # Top level message
    msg = "{}\n".format(feedback_settings["status_message"].get(
        result.returncode, "Uncommon Failure"))
    feedback.set_global_feedback(msg, True)

    print("INSIDE CUSTOM SCRIPT")

    # No runnable method(s)
    if result.returncode == 3:
        feedback.set_global_feedback(result.stdout, True)

    # if the student didn't cheat
    if result.returncode not in [2, 3]:

        # Extract the data
        format = re.compile(
            "(?P<nBugsFound>[0-9]+)\t(?P<nBugs>[0-9]+)\t(?P<status>N?FP)\n")
        regex_result = format.search(result.stdout)
        nBugsFound = int(regex_result.group("nBugsFound"))
        nBugs = int(regex_result.group("nBugs"))
        status = regex_result.group("status")
        score_ratio = nBugsFound / nBugs

        print("nBugsFound : {} , nBugs : {} , status : {}".format(
            nBugsFound, nBugs, status))

        # Notify the user its progress
        result = "success" if score_ratio >= feedback_settings[
            "quorum"] and status == "NFP" else "failed"

        # if false positive, give him zero
        updated_score_ratio = score_ratio if status == "NFP" else 0.0
        feedback.set_global_result(result)
        feedback.set_grade(updated_score_ratio * 100)

        # Give him some explanation
        msg2 = "You have found {} bug(s) on a total of {} bugs\n".format(
            nBugsFound, nBugs)
        feedback.set_global_feedback(msg2, True)
        if status == "FP":
            feedback.set_global_feedback(
                "Your test suite generates a false positive: therefore you have 0.0%.",
                True)

    else:

        feedback.set_global_result("failed")
        feedback.set_grade(0.0)
예제 #19
0
def result_feedback(result, feedback_settings):
    # Top level message
    msg = "{}\n".format(feedback_settings["status_message"].get(result.returncode, "Uncommon Failure"))

    # if we have a feedback, use it
    if feedback_settings["has_feedback"]:

        try:
            strategy = FEEDBACK_STRATEGIES[feedback_settings["feedback_kind"]]
            strategy(result, feedback_settings, msg)
        except (KeyError, RuntimeError, BaseException):
            traceback.print_exc() # useful for debugging a custom script that failed
            feedback.set_global_result("failed")
            msg = "A technical problem has occurred using the feedback strategy : {} . Please report it !" \
                .format(feedback_settings["feedback_kind"])
            feedback.set_global_feedback(msg)
            feedback.set_grade(0.0)
            sys.exit(0)

    # For exercises with binary result : 0 or 100
    else:
        feedback.set_global_feedback(msg, True)
        score_ratio = 1.0 if result.returncode == 0 else 0.0
        feedback_result(score_ratio, feedback_settings)
예제 #20
0
 def jacoco(result, feedback_settings, msg):
     if result.returncode == 0:
         score_ratio, msg = extract_jacoco_result(feedback_settings)
         feedback_result(score_ratio, feedback_settings)
         message_index = 0 if score_ratio >= feedback_settings["quorum"] else 3
         msg2 = "{}\n".format(feedback_settings["status_message"].get(message_index, "Uncommon Failure"))
         feedback.set_global_feedback(msg2, True)
         feedback.set_global_feedback(rst.get_codeblock("java", msg), True)
     else:
         feedback.set_global_feedback(msg, True)
         feedback_result(0.0, feedback_settings)
예제 #21
0
def main(result, feedback_settings):
    # Top level message
    msg = "{}\n".format(feedback_settings["status_message"].get(
        result.returncode, "Uncommon Failure"))
    feedback.set_global_feedback(msg, True)

    print("INSIDE CUSTOM SCRIPT")

    # No runnable method(s)
    if result.returncode == 3:
        feedback.set_global_feedback(result.stdout, True)

    # if the student didn't cheat
    if result.returncode not in [2, 3]:
        # Extract the data
        format = re.compile(
            "(?P<nBugsFound>[0-9]+)\t(?P<nBugs>[0-9]+)\t(?P<status>N?FP)\n")
        regex_result = format.search(result.stdout)
        nBugsFound = int(regex_result.group("nBugsFound"))
        nBugs = int(regex_result.group("nBugs"))
        status = regex_result.group("status")
        score_ratio = nBugsFound / nBugs
        extraFeedback = result.stdout.split('\n')[1:]

        print("nBugsFound : {} , nBugs : {} , status : {}".format(
            nBugsFound, nBugs, status))

        # Notify the user its progress
        result = "success" if score_ratio >= feedback_settings[
            "quorum"] and status == "NFP" else "failed"

        # if false positive, give him zero
        updated_score_ratio = score_ratio if status == "NFP" else 0.0
        feedback.set_global_result(result)
        feedback.set_grade(updated_score_ratio * 100)

        # Give him some explanation
        details = extraFeedback[0]
        listBugs = extraFeedback[1:]
        bug_feedback = rst.indent_block(1, '\n'.join(listBugs), '\t')
        global_feedback = details + bug_feedback
        feedback.set_global_feedback(global_feedback)
    else:
        feedback.set_global_result("failed")
        feedback.set_grade(0.0)
예제 #22
0
def main():
    input.parse_template(
        "correction.py"
    )  #Replace correction.py by your filename on this line and the next
    p = subprocess.Popen(shlex.split("python3 correction.py"),
                         stderr=subprocess.STDOUT,
                         stdout=subprocess.PIPE)
    make_output = p.communicate()[0].decode('utf-8')
    if p.returncode:
        feedback.set_global_result("failed")
        feedback.set_global_feedback(
            "Your code could not be executed. Please verify that all your blocks are correctly connected."
        )
        exit(0)
    elif make_output == "True\n":
        feedback.set_global_result("success")
        feedback.set_global_feedback("You solved the task !")
    else:
        feedback.set_global_result("failed")
        feedback.set_global_feedback("You made a mistake ! " + make_output)
예제 #23
0
def run():
    input.parse_template(
        "correction.py"
    )  #Replace correction.py by your filename on this line and the next
    p = subprocess.Popen(shlex.split("python3 correction.py"),
                         stderr=subprocess.STDOUT,
                         stdout=subprocess.PIPE)
    make_output = p.communicate()[0].decode('utf-8')
    if p.returncode:
        feedback.set_global_result("crash")
        feedback.set_global_feedback("""
CRASH
-----
Le code n'a pas pu être exécuté. Vérifie que tes blocs sont bien assemblés.

Détails::

    {}

""".format(make_output))
        exit(0)
    elif make_output == "True\n":
        feedback.set_global_result("success")
        feedback.set_global_feedback("""
BONNE REPONSE
-------------
Bravo, tu as résolu cet exercice !
""")
    else:
        feedback.set_global_result("failed")
        feedback.set_global_feedback("""
MAUVAISE REPONSE
----------------
Tu as fait une erreur !

Détails::

    {}
    
""".format(make_output))
예제 #24
0
def compilation_feedback(result, full_log):
    if result.returncode != 0:
        errors = extract_compilation_errors(result.stderr)

        # If any errors come from the templates, blame the student with this code
        templates_folder = helper.relative_path(PATH_TEMPLATES)
        if any(error.get("source", "templates") == templates_folder for error in errors):
            # Generate an custom RST report that will see the student
            msg = generate_compilation_errors_table(errors, [PATH_TEMPLATES])

            # Send it to Inginious
            feedback.set_global_feedback(msg, True)

        else:
            if full_log:
                # A teaching assitant wants to debug this task and no error is coming from template folder
                msg = generate_compilation_errors_table(errors, [PATH_SRC, PATH_FLAVOUR])

                # Send it to Inginious
                feedback.set_global_feedback(msg, True)

                # For debug purposes
                print(result.stderr)
            else:

                # Either the student has made mistake(s) eg in the signature(s) of method(s) ....
                # Or a TA adds code that doesn't compile
                feedback.set_global_feedback(
                    "{}{}".format(
                        "You modified the signature of at least a function or modified the content of a class.",
                        "You should not; the tests fail to compile"
                    ))

        # Final instructions commons for all scenario

        feedback.set_global_result("failed")
        feedback.set_grade(0.0)
        sys.exit(0)
예제 #25
0
def compilation_feedback(result):
    if result.returncode != 0:
        errors = extract_compilation_errors(result.stderr)

        # If any errors come from the templates, blame the student with this code
        templates_folder = helper.relative_path(PATH_TEMPLATES)
        if any(error.get("source", "templates") == templates_folder for error in errors):
            # Generate an custom RST report that will see the student
            msg = ""
            # Don't add \r to that as it produces strange results
            next_line = "\n"
            indentation = 4
            headers = ["File", "Line", "Error Message", "Code"]

            # Headers
            msg += ".. csv-table:: " + next_line
            # Need a symbol that isn't in the Java
            msg += " " * indentation + ":quote: §" + next_line
            msg += " " * indentation + ":header: " + ",".join(headers) + next_line
            msg += " " * indentation + ":widths: auto" + next_line * 2

            # Contents
            for error in [error for error in errors if error.get("source", "Unknown Source") == templates_folder]:
                # Print File , Line and Error message without problem
                msg += " " * indentation + "§{}§".format(error.get("file", "Unknown Filename"))
                msg += ",§{}§".format(error.get("line", "Unknown Line Number"))
                msg += ",§{}§".format(error.get("message", "Unknown Message"))

                # Print the code
                code_lines = error.get("code", [])

                # For whatever reason, INGINIOUS might truncate the stderr message if too long
                # It might break the CSV table ... so we need this fix
                if not code_lines:

                    # Might be confusing but they are the rules of RST for empty block
                    msg += ",§§" + next_line

                else:

                    msg += ",§.. code-block:: java" + next_line * 2
                    indentation_for_code = indentation + 1

                    for code_line in code_lines:
                        # as we are in a code block, we need indentation + 1 in order to create compilable code in RST
                        msg += " " * indentation_for_code + code_line

                        # needed test to correctly format things
                        if code_line != code_lines[-1]:
                            msg += next_line

                    # At the end , we should not forget the quote symbol and the next line
                    msg += "§" + next_line

            # Send it to Inginious
            feedback.set_global_feedback(msg, True)

        else:

            # Either the student has made mistake(s) eg in the signature(s) of method(s) ....
            # Or a TA adds code that doesn't compile
            feedback.set_global_feedback(
                "{}{}".format(
                    "You modified the signature of at least a function or modified the content of a class.",
                    "You should not; the tests fail to compile"
                ))

            # For debug purposes ; if the student code isn't to blame
            print(result.stderr)

        # Final instructions commons for all scenario

        feedback.set_global_result("failed")
        feedback.set_grade(0.0)
        sys.exit(0)
예제 #26
0
def run(customscript, execcustom, nexercices, tests=[], runner='Runner'):
    """ Parse les réponse des étudiant, compile et lance les tests et donne le feedback aux étudiant

    Keyword arguments:
    customscript -- nom d'un script personnalisé
    execcustom -- Si oui (valeur != 0) ou non (0) il faut exécuter le script personnalisé customscript
    nexercices -- la nombre d'exercice dans la tâche
    tests -- Fichiers de test à lancer
    runner -- Fichier runner (default 'Runner')
    """
    #Récupération des fichiers de tests si jamais il ne sont pas fournis à l'appel de la méthode
    if not tests:
        tests = get_test_files(runner)
    code_litteral = ".. code-block::\n\n"
    parsetemplate()  # Parse les réponses de l'étudiant
    if execcustom != 0:  # On doit exécuter le script personnalsé
        # If this is a python script we call the main() method with the _() function to transmit the translation mechanism
        if (customscript == "custom_translatable.py"):
            from custom_translatable import main
            outcustom = main(_)
        else:
            outcustom = subprocess.call(['./' + customscript],
                                        universal_newlines=True)
        if outcustom != 0:  # Le script a renvoyé une erreur
            exit()

    # On compile les fichier. La fonction map applique une fonction (argument 1) sur une liste (argument 2)
    # L'expression lambda définit une fonction anonyme qui ajoute le dossiers src et l'extension .java aux nom de fichier tests
    anonymous_fun = lambda file: './src/' + file + '.java'  # Create anonymous funcntion
    Log = compile_files([anonymous_fun(file) for file in tests + [runner]])
    if Log == "":  # La compilation a réussie
        with open('err.txt', 'w+', encoding="utf-8") as f:
            # On lance le runner
            os.chdir('./student')
            java_cmd = "run_student java -ea -cp " + librairies()
            # On passe comme argument au fichier runner les fichier de tests (Voir documentation runner)
            resproc = subprocess.Popen(shlex.split(java_cmd) +
                                       ['src/' + runner] + tests,
                                       universal_newlines=True,
                                       stderr=f,
                                       stdout=subprocess.PIPE)
            resproc.communicate()
            resultat = resproc.returncode
            f.flush()
            f.seek(0)
            outerr = f.read()
            print(
                outerr
            )  # On affiche la sortie de stderr dans les informations de debug
            if resultat == 127:  # Les tests ont réussis
                feedback.set_global_result('success')
            elif resultat == 252:  # Limite de mémoire dépassée
                feedback.set_global_result('failed')
                feedback.set_global_feedback(
                    _("La limite de mémoire de votre programme est dépassée"))
            elif resultat == 253:  # timeout
                feedback.set_global_result('failed')
                feedback.set_global_feedback(
                    _("La limite de temps d'exécution de votre programme est dépassée"
                      ))
            else:  # Les tests ont échouées
                if nexercices == 1:
                    outerr = add_indentation_level(
                        outerr
                    )  # On ajoute de l'indentation pour que ça s'affiche dans un cadre gris pour les étudiants
                    feedback.set_global_result('failed')
                    feedback.set_global_feedback(
                        _("Il semble que vous ayiez fait des erreurs dans votre code…\n\n"
                          ) + code_litteral + outerr + "\n")
                else:
                    i = 1
                    while i <= nexercices:
                        """
                        Cette regex va matcher tout ce qui se trouve entre
                            - @i (avec i le numéro du sous-problème que l'on considère
                            - @<un ou plusieurs chiffre>

                        et donc matcher tout les feedback associés au sous-problème que l'on considère
                        """
                        regex = '@' + str(i) + ' :\n(.*?)(?=@\d+ :|$)'
                        regex_question = re.findall(regex, outerr, re.DOTALL)
                        if len(
                                regex_question
                        ) == 0:  # Il n'y a pas de match pour la regex, le sous-problème est bien répondu
                            feedback.set_problem_feedback(
                                _("Vous avez bien répondu à cette question"),
                                "q" + str(i))
                        else:
                            outerr_question = ''.join(
                                regex_question
                            )  # On remet tout les feedback trouvé en un seul
                            outerr_question = add_indentation_level(
                                outerr_question)  # on l'indente
                            feed = _(
                                "Il semble que vous ayiez fait des erreurs dans votre code…\n\n"
                            ) + code_litteral + outerr_question + "\n"
                            feedback.set_problem_feedback(feed, "q" + str(i))
                        i += 1
    else:  # La compilation a raté
        Log = add_indentation_level(Log)
        feed = _(
            "Le programme ne compile pas : \n\n") + code_litteral + Log + "\n"
        feedback.set_global_result('failed')
        feedback.set_global_feedback(feed)
예제 #27
0
os.chdir("student")

# Fetch and save the student code into a file for compilation
input.parse_template("student_code.c.tpl", "student_code.c")

# Compilation
p = subprocess.Popen(shlex.split("make"),
                     stderr=subprocess.STDOUT,
                     stdout=subprocess.PIPE)
make_output = p.communicate()[0].decode('utf-8')
# If compilation failed, exit with "failed" result
if p.returncode:
    feedback.set_tag("not_compile", True)
    feedback.set_global_result("failed")
    feedback.set_global_feedback(
        "La compilation de votre code a échoué. Voici le message de sortie de la commande ``make`` :"
    )
    feedback.set_global_feedback(rst.get_codeblock('', make_output), True)
    exit(0)
else:
    # Cppcheck
    p = subprocess.Popen(shlex.split("make check"),
                         stderr=subprocess.STDOUT,
                         stdout=subprocess.PIPE)
    cppcheck_output = p.communicate()[0].decode('utf-8')
    if p.returncode:
        feedback.set_tag("cppcheck", True)
        feedback.set_global_result("failed")
        feedback.set_global_feedback(
            "La compilation de votre code avec ``cppcheck`` a échoué. Voici le message de sortie de la commande ``make check`` :"
        )
예제 #28
0
def error(msg):
    feedback.set_global_result("crash")
    feedback.set_global_feedback(msg)
    exit(0)
예제 #29
0
import os, re, hashlib, subprocess, shlex, shutil
import tempfile, tarfile
from inginious import feedback, rst, input

#################
# Check filename
#################

fname = input.get_input("p1archive:filename")

if re.match(r"^[0-9]{6}00\.tar\.gz$", fname) is None:
    feedback.set_global_result("failed")
    feedback.set_global_feedback(
        "Nom de l'archive incorrect. Format attendu: NOMA.tar.gz (exemple: 12341500.tar.gz)"
    )
    exit(1)

noma = fname.replace(".tar.gz", "")

##################
# Extracting file
##################

fp = tempfile.TemporaryFile()
fp.write(input.get_input("p1archive:value"))
fp.seek(0)

try:
    tar = tarfile.open(fileobj=fp)
    tar.extractall()
    tar.close()
예제 #30
0
    else:
        clear_package(filename)
    judging = judge_java(name, CHECKER, TIMELIMIT)
    print('finished judging java')
elif (ext == 'cpp'):
    print('received cpp solution')
    if not local:
        os.system('getinput {0} > {1}.cpp'.format(TASKNAME, name))
    judging = judge_cpp(name, CHECKER, TIMELIMIT)
    print('finished judging cpp')
elif (ext == 'py'):
    print('received python solution')
    if not local:
        os.system('/bin/bash -c "getinput {0} > {1}.py"'.format(
            TASKNAME, name))
    judging = judge_py(name, CHECKER, TIMELIMIT)
    print('finished judging python')

if local:
    for fn in os.listdir('./'):
        if not fn in files_before:
            print('deleting {0}'.format(fn))
            os.system('rm ./{0}'.format(fn))
    print(judging.produce_feedback_message())
else:
    if judging.is_accepted():
        feedback.set_global_result("success")
    else:
        feedback.set_global_result("failed")
    feedback.set_global_feedback(judging.produce_feedback_message())
def run(exercice, customscript, corr, execcustom, nexercices, javac_args, java_args, code_litteral):
    run_custom(customscript=customscript, execcustom=execcustom)
    outother, output = compile_files(javac_args)
    outcorr = ""
    if corr != 0:
        outcorr = compile_corr(javac_args)
    erreur_enseignant = 0
    message_enseignant = ""
    if outcorr != "":
        outcorr = add_indentation_level(outcorr)
        erreur_enseignant = 1
        message_enseignant = outcorr

    if outother != "":
        # On indente le message pour le faire passer dans le code-block rst
        outother = add_indentation_level(outother)
        erreur_enseignant = 1
        message_enseignant = message_enseignant + "\n" + outother

    if erreur_enseignant != 0:
        feedback.set_result('failed')
        feedback.set_global_feedback("Le programme ne compile pas: \n " + code_litteral + message_enseignant + "\n")
        sys.exit(0)

    error = 0
    # Si output est vide et qu'il n'y a donc pas d'erreur de compilation
    if output == "":
        with open('err.txt', 'w+') as f:
            # On lance l'exercice 1
            resproc = subprocess.Popen(java_args + ['student/' + exercice], universal_newlines=True, stderr=f,
                                       stdout=subprocess.PIPE)
            resproc.communicate()
            resultat = resproc.returncode
            f.flush()
            f.seek(0)
            outerr = f.read()
        # Si les tests se sont bien passés (valeur de retour = 127)
        if resultat == 127:
            if nexercices == 1:
                feedback.set_result('success')
                feedback.set_problem_feedback("Bravo, votre code est correct !", "q1")
            else:
                j = 1
                while j <= nexercices:
                    # On fait un feedback positif par question
                    feedback.set_result('success')
                    feedback.set_problem_feedback("Vous avez bien répondu à cette question", "q" + str(j))
                    j += 1
        elif resultat == 252:
            feedback.set_result('failed')
            feedback.set_global_feedback("La limite de mémoire de votre programme est dépassée")
            sys.exit(0)
        elif resultat == 253:
            feedback.set_result('timeout')
            feedback.set_global_feedback("La limite de temps d'exécution de votre programme est dépassée")
            exit()
        else:
            # Sinon c'est que les tests ont échoué, le programme possède des erreurs.
            if nexercices == 1:
                outerr = add_indentation_level(outerr).replace('%', '%%')
                feedback.set_result('failed')
                feedback.set_problem_feedback("Il semble que vous ayiez fait des erreurs dans votre code...\n " +
                                              code_litteral + outerr + "\n", "q1")
                error = 1
            else:
                i = 1
                while i <= nexercices:
                    # On récupère un feedback par question dans le System.err, en suivant le format imposé par convention
                    with open('question.out', 'w+') as f2:
                        proc = subprocess.Popen(['sed', '-e', "/Question " + str(i) + " :/,/Question [^\D1] :/!d"],
                                                universal_newlines=True, stdout=f2, stdin=f)
                        proc.communicate()
                        f2.seek(0)
                        regex_question = re.findall('^Question ' + str(i) + ' :\n(.*?)^Question [^\D1] :',
                                                      outerr, re.DOTALL | re.MULTILINE)
                    if len(regex_question) == 0:
                        feedback.set_result('success')
                        feedback.set_problem_feedback("Vous avez bien répondu à cette question", "q" + str(i))
                    else:
                        # On joint les matchs de la regex dans un seul string
                        outerr_question = ''.join(regex_question)
                        outerr_question = add_indentation_level(outerr_question).replace('%', '%%')
                        feed = "Il semble que vous ayiez fait des erreurs dans votre code...\n " + code_litteral + outerr_question + "\n"
                        feedback.set_result('failed')
                        feedback.set_problem_feedback(feed, "q" + str(i))
                    i += 1
                error = 1

                # On vérifie si la tâche s'est bien déroulée ou s'il y a eu un souci, et on fait un feedback de la tâche complète
        if error == 0:
            feedback.set_result('success')
            feedback.set_global_feedback("Bravo, votre code est correct !")
        else:
            feedback.set_result('failed')
            feedback.set_global_feedback("Vous n'avez pas réussi tous les exercices")
    # erreur de compilation
    else:
        with open('outputglobal.out', 'w+') as f:
            output = add_indentation_level(output)
            feed = "Votre programme ne compile pas: \n " + code_litteral + output + "\n"
            feedback.set_result('failed')
            feedback.set_global_feedback(feed)