def get_result_details(semester, course, g_id, who_id, version):
    """
    Gets the result details for a particular version of a gradeable for the who (user or team).
    It returns a dictionary that contains a list of the testcases (that should have a 1-to-1
    correspondence with the testcases gotten through get_testcases() method) and the submission
    time for the particular version.

    :param semester:
    :param course:
    :param g_id:
    :param who_id:
    :param version:
    :return:
    """
    result_details = {'testcases': [], 'submission_time': None}
    result_dir = os.path.join(DATA_DIR, "courses", semester, course, "results",
                              g_id, who_id, str(version))
    if os.path.isfile(os.path.join(result_dir, "results.json")):
        with open(os.path.join(result_dir, "results.json")) as result_file:
            result_json = json.load(result_file)
            if 'testcases' in result_json and result_json[
                    'testcases'] is not None:
                for testcase in result_json['testcases']:
                    result_details['testcases'].append(
                        {'points': testcase['points_awarded']})

    if os.path.isfile(os.path.join(result_dir, "history.json")):
        with open(os.path.join(result_dir, "history.json")) as result_file:
            result_json = json.load(result_file)
            # a = datetime.strptime(result_json[-1]['submission_time'], "%a %b  %d %H:%M:%S %Z %Y")
            a = dateutils.read_submitty_date(
                result_json[-1]['submission_time'])
            result_details['submission_time'] = '{}-{:02d}-{:02d} {:02d}:{:02d}:{:02d}' \
                .format(a.year, a.month, a.day, a.hour, a.minute, a.second)
    return result_details
def archive_autograding_results(working_directory, job_id, which_untrusted, is_batch_job, complete_config_obj, 
                                gradeable_config_obj, queue_obj, log_path, stack_trace_log_path, is_test_environment):
    """ After grading is finished, archive the results. """

    tmp_autograding = os.path.join(working_directory,"TMP_AUTOGRADING")
    tmp_submission = os.path.join(working_directory,"TMP_SUBMISSION")
    tmp_work = os.path.join(working_directory,"TMP_WORK")
    tmp_logs = os.path.join(working_directory,"TMP_SUBMISSION","tmp_logs")
    tmp_results = os.path.join(working_directory,"TMP_RESULTS")
    submission_path = os.path.join(tmp_submission, "submission")
    random_output_path = os.path.join(tmp_work, 'random_output')

    partial_path = os.path.join(queue_obj["gradeable"],queue_obj["who"],str(queue_obj["version"]))
    item_name = os.path.join(queue_obj["semester"],queue_obj["course"],"submissions",partial_path)
    results_public_dir = os.path.join(tmp_results,"results_public")
    results_details_dir = os.path.join(tmp_results, "details")
    patterns = complete_config_obj['autograding']

    # Copy work to details
    pattern_copy("work_to_details", patterns['work_to_details'], tmp_work, results_details_dir, tmp_logs)
    
    # Copy work to public
    if 'work_to_public' in patterns:
        pattern_copy("work_to_public", patterns['work_to_public'], tmp_work, results_public_dir, tmp_logs)

    if os.path.exists(random_output_path):
        pattern_copy("work_to_random_output", [os.path.join(random_output_path, 'test*', '**', '*.txt'),], tmp_work, tmp_results, tmp_logs)
    # grab the submission time
    with open(os.path.join(tmp_submission, 'submission' ,".submit.timestamp"), 'r') as submission_time_file:
        submission_string = submission_time_file.read().rstrip()

    history_file_tmp = os.path.join(tmp_submission,"history.json")
    history_file = os.path.join(tmp_results,"history.json")
    if os.path.isfile(history_file_tmp) and not is_test_environment:

        from . import CONFIG_PATH
        with open(os.path.join(CONFIG_PATH, 'submitty_users.json')) as open_file:
            OPEN_JSON = json.load(open_file)
        DAEMON_UID = OPEN_JSON['daemon_uid']

        shutil.move(history_file_tmp, history_file)
        # fix permissions
        ta_group_id = os.stat(tmp_results).st_gid
        os.chown(history_file, int(DAEMON_UID),ta_group_id)
        add_permissions(history_file, stat.S_IRGRP)
    grading_finished = dateutils.get_current_time()


    try:
        shutil.copy(os.path.join(tmp_work, "grade.txt"), tmp_results)
    except:
        with open(os.path.join(tmp_logs,"overall.txt"),'a') as f:
            print ("\n\nERROR: Grading incomplete -- Could not copy ",os.path.join(tmp_work,"grade.txt"))
        log_message(log_path, job_id, is_batch_job, which_untrusted, item_name, message="ERROR: grade.txt does not exist")
        log_stack_trace(stack_trace_log_path, job_id, is_batch_job, which_untrusted, item_name, trace=traceback.format_exc())

    grade_result = ""
    try:
        with open(os.path.join(tmp_work,"grade.txt")) as f:
            lines = f.readlines()
            for line in lines:
                line = line.rstrip('\n')
                if line.startswith("Automatic grading total:"):
                    grade_result = line
    except:
        with open(os.path.join(tmp_logs,"overall.txt"),'a') as f:
            print ("\n\nERROR: Grading incomplete -- Could not open ",os.path.join(tmp_work,"grade.txt"))
            log_message(job_id,is_batch_job,which_untrusted,item_name,message="ERROR: grade.txt does not exist")
            log_stack_trace(job_id,is_batch_job,which_untrusted,item_name,trace=traceback.format_exc())


    gradeable_deadline_string = gradeable_config_obj["date_due"]
    
    submission_datetime = dateutils.read_submitty_date(submission_string)
    gradeable_deadline_datetime = dateutils.read_submitty_date(gradeable_deadline_string)
    gradeable_deadline_longstring = dateutils.write_submitty_date(gradeable_deadline_datetime)
    submission_longstring = dateutils.write_submitty_date(submission_datetime)
    seconds_late = int((submission_datetime-gradeable_deadline_datetime).total_seconds())

    # note: negative = not late
    grading_finished_longstring = dateutils.write_submitty_date(grading_finished)

    with open(os.path.join(tmp_submission,".grading_began"), 'r') as f:
        grading_began_longstring = f.read()
    grading_began = dateutils.read_submitty_date(grading_began_longstring)

    gradingtime = (grading_finished - grading_began).total_seconds()

    queue_obj["gradingtime"]=gradingtime
    queue_obj["grade_result"]=grade_result
    queue_obj["which_untrusted"]=which_untrusted
    waittime = queue_obj["waittime"]

    with open(os.path.join(tmp_results,"queue_file.json"),'w') as outfile:
        json.dump(queue_obj,outfile,sort_keys=True,indent=4,separators=(',', ': '))

    try:
        shutil.move(os.path.join(tmp_work, "results.json"), os.path.join(tmp_results, "results.json"))
    except:
        with open(os.path.join(tmp_logs,"overall.txt"),'a') as f:
            print ("\n\nERROR: Grading incomplete -- Could not open/write ",os.path.join(tmp_work,"results.json"))
            log_message(log_path, job_id,is_batch_job,which_untrusted,item_name,message="ERROR: results.json read/write error")
            log_stack_trace(stack_trace_log_path, job_id,is_batch_job,which_untrusted,item_name,trace=traceback.format_exc())

    just_write_grade_history(history_file,
                             gradeable_deadline_longstring,
                             submission_longstring,
                             seconds_late,
                             queue_obj["queue_time"],
                             "BATCH" if is_batch_job else "INTERACTIVE",
                             grading_began_longstring,
                             int(waittime),
                             grading_finished_longstring,
                             int(gradingtime),
                             grade_result,
                             queue_obj.get("revision", None))

    with open(os.path.join(tmp_logs,"overall.txt"),'a') as f:
        f.write("FINISHED GRADING!\n")

    # save the logs!
    shutil.copytree(tmp_logs,os.path.join(tmp_results,"logs"))
    log_message(log_path, job_id,is_batch_job,which_untrusted,item_name,"grade:",gradingtime,grade_result)
Exemple #3
0
def grade_from_zip(my_autograding_zip_file,my_submission_zip_file,which_untrusted):

    os.chdir(SUBMITTY_DATA_DIR)
    tmp = os.path.join("/var/local/submitty/autograding_tmp/",which_untrusted,"tmp")

    if os.path.exists(tmp):
        untrusted_grant_rwx_access(which_untrusted, tmp)
        add_permissions_recursive(tmp,
                  stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR | stat.S_IRGRP | stat.S_IWGRP | stat.S_IXGRP | stat.S_IROTH | stat.S_IWOTH | stat.S_IXOTH,
                  stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR | stat.S_IRGRP | stat.S_IWGRP | stat.S_IXGRP | stat.S_IROTH | stat.S_IWOTH | stat.S_IXOTH,
                  stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR | stat.S_IRGRP | stat.S_IWGRP | stat.S_IXGRP | stat.S_IROTH | stat.S_IWOTH | stat.S_IXOTH)

    # Remove any and all containers left over from past runs.
    old_containers = subprocess.check_output(['docker', 'ps', '-aq', '-f', 'name={0}'.format(which_untrusted)]).split()

    for old_container in old_containers:
        subprocess.call(['docker', 'rm', '-f', old_container.decode('utf8')])

    # clean up old usage of this directory
    shutil.rmtree(tmp,ignore_errors=True)
    os.mkdir(tmp)

    which_machine=socket.gethostname()

    # unzip autograding and submission folders
    tmp_autograding = os.path.join(tmp,"TMP_AUTOGRADING")
    tmp_submission = os.path.join(tmp,"TMP_SUBMISSION")
    try:
        unzip_this_file(my_autograding_zip_file,tmp_autograding)
        unzip_this_file(my_submission_zip_file,tmp_submission)
    except:
        raise
    os.remove(my_autograding_zip_file)
    os.remove(my_submission_zip_file)

    tmp_logs = os.path.join(tmp,"TMP_SUBMISSION","tmp_logs")

    queue_file = os.path.join(tmp_submission,"queue_file.json")
    with open(queue_file, 'r') as infile:
        queue_obj = json.load(infile)

    queue_time_longstring = queue_obj["queue_time"]
    waittime = queue_obj["waittime"]
    is_batch_job = queue_obj["regrade"]
    job_id = queue_obj["job_id"]
    is_batch_job_string = "BATCH" if is_batch_job else "INTERACTIVE"
    revision = queue_obj.get("revision", None)

    partial_path = os.path.join(queue_obj["gradeable"],queue_obj["who"],str(queue_obj["version"]))
    item_name = os.path.join(queue_obj["semester"],queue_obj["course"],"submissions",partial_path)

    grade_items_logging.log_message(job_id,is_batch_job,which_untrusted,item_name,"wait:",waittime,"")

    with open(os.path.join(tmp_submission,".grading_began"), 'r') as f:
        grading_began_longstring = f.read()
    grading_began = dateutils.read_submitty_date(grading_began_longstring)

    submission_path = os.path.join(tmp_submission, "submission")
    checkout_path = os.path.join(tmp_submission, "checkout")

    provided_code_path = os.path.join(tmp_autograding, "provided_code")
    test_input_path = os.path.join(tmp_autograding, "test_input")
    test_output_path = os.path.join(tmp_autograding, "test_output")
    custom_validation_code_path = os.path.join(tmp_autograding, "custom_validation_code")
    bin_path = os.path.join(tmp_autograding, "bin")
    form_json_config = os.path.join(tmp_autograding, "form.json")
    complete_config = os.path.join(tmp_autograding, "complete_config.json")

    with open(form_json_config, 'r') as infile:
        gradeable_config_obj = json.load(infile)
    gradeable_deadline_string = gradeable_config_obj["date_due"]

    with open(complete_config, 'r') as infile:
        complete_config_obj = json.load(infile)

    is_vcs = gradeable_config_obj["upload_type"] == "repository"
    checkout_subdirectory = complete_config_obj["autograding"].get("use_checkout_subdirectory","")
    checkout_subdir_path = os.path.join(checkout_path, checkout_subdirectory)

    if complete_config_obj.get('one_part_only', False):
        allow_only_one_part(submission_path, os.path.join(tmp_logs, "overall.txt"))
        if is_vcs:
            with open(os.path.join(tmp_logs, "overall.txt"), 'a') as f:
                print("WARNING:  ONE_PART_ONLY OPTION DOES NOT MAKE SENSE WITH VCS SUBMISSION", file=f)


    # --------------------------------------------------------------------
    # START DOCKER

    # NOTE: DOCKER SUPPORT PRELIMINARY -- NEEDS MORE SECURITY BEFORE DEPLOYED ON LIVE SERVER
    complete_config = os.path.join(tmp_autograding,"complete_config.json")
    with open(complete_config, 'r') as infile:
        complete_config_obj = json.load(infile)

    # Save ourselves if autograding_method is None.
    autograding_method = complete_config_obj.get("autograding_method", "")
    USE_DOCKER = True if autograding_method == "docker" else False

    # --------------------------------------------------------------------
    # COMPILE THE SUBMITTED CODE

    with open(os.path.join(tmp_logs, "overall.txt"), 'a') as f:
        print("====================================\nCOMPILATION STARTS", file=f)
    
    # copy submitted files to the tmp compilation directory
    tmp_compilation = os.path.join(tmp,"TMP_COMPILATION")
    os.mkdir(tmp_compilation)
    os.chdir(tmp_compilation)

    submission_path = os.path.join(tmp_submission,"submission")
    checkout_path = os.path.join(tmp_submission,"checkout")

    provided_code_path = os.path.join(tmp_autograding,"provided_code")
    test_input_path = os.path.join(tmp_autograding,"test_input")
    test_output_path = os.path.join(tmp_autograding,"test_output")
    custom_validation_code_path = os.path.join(tmp_autograding,"custom_validation_code")
    bin_path = os.path.join(tmp_autograding,"bin")
    form_json_config = os.path.join(tmp_autograding,"form.json")


    with open(form_json_config, 'r') as infile:
        gradeable_config_obj = json.load(infile)
    gradeable_deadline_string = gradeable_config_obj["date_due"]
    
    patterns_submission_to_compilation = complete_config_obj["autograding"]["submission_to_compilation"]

    add_permissions(tmp_logs,stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR)

    if USE_DOCKER:
        print("!!!!!!!!!!!!!!!!!!USING DOCKER!!!!!!!!!!!!!!!!!!!!!!!!")

    with open(complete_config, 'r') as infile:
        config = json.load(infile)
        my_testcases = config['testcases']

    # grab the submission time
    with open(os.path.join(submission_path,".submit.timestamp"), 'r') as submission_time_file:
        submission_string = submission_time_file.read().rstrip()

    with open(os.path.join(tmp_logs,"compilation_log.txt"), 'w') as logfile:
        # we start counting from one.
        executable_path_list = list()
        for testcase_num in range(1, len(my_testcases)+1):
            testcase_folder = os.path.join(tmp_compilation, "test{:02}".format(testcase_num))

            if 'type' in my_testcases[testcase_num-1]:
                if my_testcases[testcase_num-1]['type'] != 'FileCheck' and my_testcases[testcase_num-1]['type'] != 'Compilation':
                    continue

                if my_testcases[testcase_num-1]['type'] == 'Compilation':
                    if 'executable_name' in my_testcases[testcase_num-1]:
                        provided_executable_list = my_testcases[testcase_num-1]['executable_name']
                        if not isinstance(provided_executable_list, (list,)):
                            provided_executable_list = list([provided_executable_list])
                        for executable_name in provided_executable_list:
                            if executable_name.strip() == '':
                                continue
                            executable_path = os.path.join(testcase_folder, executable_name)
                            executable_path_list.append((executable_path, executable_name))
            else:
                continue

            os.makedirs(testcase_folder)
            
            pattern_copy("submission_to_compilation",patterns_submission_to_compilation,submission_path,testcase_folder,tmp_logs)

            if is_vcs:
                pattern_copy("checkout_to_compilation",patterns_submission_to_compilation,checkout_subdir_path,testcase_folder,tmp_logs)

            # copy any instructor provided code files to tmp compilation directory
            copy_contents_into(job_id,provided_code_path,testcase_folder,tmp_logs)
            
            # copy compile.out to the current directory
            shutil.copy (os.path.join(bin_path,"compile.out"),os.path.join(testcase_folder,"my_compile.out"))
            add_permissions(os.path.join(testcase_folder,"my_compile.out"), stat.S_IXUSR | stat.S_IXGRP |stat.S_IROTH | stat.S_IWOTH | stat.S_IXOTH)
            #untrusted_grant_rwx_access(which_untrusted, tmp_compilation)          
            untrusted_grant_rwx_access(which_untrusted, testcase_folder)
            add_permissions_recursive(testcase_folder,
                      stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR | stat.S_IRGRP | stat.S_IWGRP | stat.S_IXGRP | stat.S_IROTH | stat.S_IWOTH | stat.S_IXOTH,
                      stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR | stat.S_IRGRP | stat.S_IWGRP | stat.S_IXGRP | stat.S_IROTH | stat.S_IWOTH | stat.S_IXOTH,
                      stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR | stat.S_IRGRP | stat.S_IWGRP | stat.S_IXGRP | stat.S_IROTH | stat.S_IWOTH | stat.S_IXOTH)

            if USE_DOCKER:
                try:
                    #There can be only one container for a compilation step, so grab its container image
                    #TODO: set default in load_config_json.cpp
                    if my_testcases[testcase_num-1]['type'] == 'FileCheck':
                        print("performing filecheck in default ubuntu:custom container")
                        container_image = "ubuntu:custom"
                    else:
                        container_image = my_testcases[testcase_num-1]["containers"][0]["container_image"]
                        print('creating a compilation container with image {0}'.format(container_image))
                    untrusted_uid = str(getpwnam(which_untrusted).pw_uid)

                    compilation_container = None
                    compilation_container = subprocess.check_output(['docker', 'create', '-i', '-u', untrusted_uid, '--network', 'none',
                                               '-v', testcase_folder + ':' + testcase_folder,
                                               '-w', testcase_folder,
                                               container_image,
                                               #The command to be run.
                                               os.path.join(testcase_folder, 'my_compile.out'), 
                                               queue_obj['gradeable'],
                                               queue_obj['who'], 
                                               str(queue_obj['version']), 
                                               submission_string, 
                                               '--testcase', str(testcase_num)
                                               ]).decode('utf8').strip()
                    print("starting container")
                    compile_success = subprocess.call(['docker', 'start', '-i', compilation_container],
                                                   stdout=logfile,
                                                   cwd=testcase_folder)
                except Exception as e:
                    print('An error occurred when compiling with docker.')
                    grade_items_logging.log_stack_trace(job_id,is_batch_job,which_untrusted,item_name,trace=traceback.format_exc())
                finally:
                    if compilation_container != None:
                        subprocess.call(['docker', 'rm', '-f', compilation_container])
                        print("cleaned up compilation container.")
            else:
                compile_success = subprocess.call([os.path.join(SUBMITTY_INSTALL_DIR, "sbin", "untrusted_execute"),
                                                   which_untrusted,
                                                   os.path.join(testcase_folder,"my_compile.out"),
                                                   queue_obj["gradeable"],
                                                   queue_obj["who"],
                                                   str(queue_obj["version"]),
                                                   submission_string,
                                                   '--testcase', str(testcase_num)],
                                                   stdout=logfile, 
                                                   cwd=testcase_folder)
            # remove the compilation program
            untrusted_grant_rwx_access(which_untrusted, testcase_folder)
            os.remove(os.path.join(testcase_folder,"my_compile.out"))

    if compile_success == 0:
        print (which_machine,which_untrusted,"COMPILATION OK")
    else:
        print (which_machine,which_untrusted,"COMPILATION FAILURE")
        grade_items_logging.log_message(job_id,is_batch_job,which_untrusted,item_name,message="COMPILATION FAILURE")
    add_permissions_recursive(tmp_compilation,
                      stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR | stat.S_IRGRP | stat.S_IWGRP | stat.S_IXGRP | stat.S_IROTH | stat.S_IWOTH | stat.S_IXOTH,
                      stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR | stat.S_IRGRP | stat.S_IWGRP | stat.S_IXGRP | stat.S_IROTH | stat.S_IWOTH | stat.S_IXOTH,
                      stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR | stat.S_IRGRP | stat.S_IWGRP | stat.S_IXGRP | stat.S_IROTH | stat.S_IWOTH | stat.S_IXOTH)


    # return to the main tmp directory
    os.chdir(tmp)


    # --------------------------------------------------------------------
    # make the runner directory

    with open(os.path.join(tmp_logs,"overall.txt"),'a') as f:
        print ("====================================\nRUNNER STARTS", file=f)
        
    tmp_work = os.path.join(tmp,"TMP_WORK")
    tmp_work_test_input = os.path.join(tmp_work, "test_input")
    tmp_work_submission = os.path.join(tmp_work, "submitted_files")
    tmp_work_compiled = os.path.join(tmp_work, "compiled_files")
    tmp_work_checkout = os.path.join(tmp_work, "checkout")
    
    os.mkdir(tmp_work)

    os.mkdir(tmp_work_test_input)
    os.mkdir(tmp_work_submission)
    os.mkdir(tmp_work_compiled)
    os.mkdir(tmp_work_checkout)

    os.chdir(tmp_work)

    # move all executable files from the compilation directory to the main tmp directory
    # Note: Must preserve the directory structure of compiled files (esp for Java)

    patterns_submission_to_runner = complete_config_obj["autograding"]["submission_to_runner"]

    pattern_copy("submission_to_runner",patterns_submission_to_runner,submission_path,tmp_work_submission,tmp_logs)
    if is_vcs:
        pattern_copy("checkout_to_runner",patterns_submission_to_runner,checkout_subdir_path,tmp_work_checkout,tmp_logs)

    # move the compiled files into the tmp_work_compiled directory
    for path, name in executable_path_list:
        if not os.path.isfile(path): 
            continue
        target_path = os.path.join(tmp_work_compiled, name)
        if not os.path.exists(target_path):
            os.makedirs(os.path.dirname(target_path), exist_ok=True)
        shutil.copy(path, target_path)
        print('copied over {0}'.format(target_path))
        with open(os.path.join(tmp_logs,"overall.txt"),'a') as f:
            print('grade_item: copied over {0}'.format(target_path), file=f)

    patterns_compilation_to_runner = complete_config_obj["autograding"]["compilation_to_runner"]
    #copy into the actual tmp_work directory for archiving/validating
    pattern_copy("compilation_to_runner",patterns_compilation_to_runner,tmp_compilation,tmp_work,tmp_logs)
    #copy into tmp_work_compiled, which is provided to each testcase
    # TODO change this as our methodology for declaring testcase dependencies becomes more robust
    pattern_copy("compilation_to_runner",patterns_compilation_to_runner,tmp_compilation,tmp_work_compiled,tmp_logs)

    # copy input files to tmp_work directory
    copy_contents_into(job_id,test_input_path,tmp_work_test_input,tmp_logs)

    subprocess.call(['ls', '-lR', '.'], stdout=open(tmp_logs + "/overall.txt", 'a'))

    # copy runner.out to the current directory
    shutil.copy (os.path.join(bin_path,"run.out"),os.path.join(tmp_work,"my_runner.out"))

    #set the appropriate permissions for the newly created directories 
    #TODO replaces commented out code below

    add_permissions(os.path.join(tmp_work,"my_runner.out"), stat.S_IXUSR | stat.S_IXGRP |stat.S_IROTH | stat.S_IWOTH | stat.S_IXOTH)
    add_permissions(tmp_work_submission, stat.S_IROTH | stat.S_IWOTH | stat.S_IXOTH)
    add_permissions(tmp_work_compiled, stat.S_IROTH | stat.S_IWOTH | stat.S_IXOTH)
    add_permissions(tmp_work_checkout, stat.S_IROTH | stat.S_IWOTH | stat.S_IXOTH)

    #TODO this is how permissions used to be set. It was removed because of the way it interacts with the sticky bit.
    ## give the untrusted user read/write/execute permissions on the tmp directory & files
    # os.system('ls -al {0}'.format(tmp_work))
    # add_permissions_recursive(tmp_work,
    #                           stat.S_IROTH | stat.S_IWOTH | stat.S_IXOTH,
    #                           stat.S_IROTH | stat.S_IWOTH | stat.S_IXOTH,
    #                           stat.S_IROTH | stat.S_IWOTH | stat.S_IXOTH)

    ##################################################################################################
    #call grade_item_main_runner.py
    runner_success = grade_item_main_runner.executeTestcases(complete_config_obj, tmp_logs, tmp_work, queue_obj, submission_string, 
                                                                                    item_name, USE_DOCKER, None, which_untrusted,
                                                                                    job_id, grading_began)
    ##################################################################################################

    if runner_success == 0:
        print (which_machine,which_untrusted, "RUNNER OK")
    else:
        print (which_machine,which_untrusted, "RUNNER FAILURE")
        grade_items_logging.log_message(job_id, is_batch_job, which_untrusted, item_name, message="RUNNER FAILURE")

    add_permissions_recursive(tmp_work,
                          stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR | stat.S_IRGRP | stat.S_IWGRP | stat.S_IXGRP | stat.S_IROTH | stat.S_IWOTH | stat.S_IXOTH,
                          stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR | stat.S_IRGRP | stat.S_IWGRP | stat.S_IXGRP | stat.S_IROTH | stat.S_IWOTH | stat.S_IXOTH,
                          stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR | stat.S_IRGRP | stat.S_IWGRP | stat.S_IXGRP | stat.S_IROTH | stat.S_IWOTH | stat.S_IXOTH) 
    add_permissions_recursive(tmp_compilation,
                          stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR | stat.S_IRGRP | stat.S_IWGRP | stat.S_IXGRP | stat.S_IROTH | stat.S_IWOTH | stat.S_IXOTH,
                          stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR | stat.S_IRGRP | stat.S_IWGRP | stat.S_IXGRP | stat.S_IROTH | stat.S_IWOTH | stat.S_IXOTH,
                          stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR | stat.S_IRGRP | stat.S_IWGRP | stat.S_IXGRP | stat.S_IROTH | stat.S_IWOTH | stat.S_IXOTH) 

    # --------------------------------------------------------------------
    # RUN VALIDATOR
    with open(os.path.join(tmp_logs,"overall.txt"),'a') as f:
        print ("====================================\nVALIDATION STARTS", file=f)

    # copy results files from compilation...
    patterns_submission_to_validation = complete_config_obj["autograding"]["submission_to_validation"]
    pattern_copy("submission_to_validation",patterns_submission_to_validation,submission_path,tmp_work,tmp_logs)
    if is_vcs:
        pattern_copy("checkout_to_validation",patterns_submission_to_validation,checkout_subdir_path,tmp_work,tmp_logs)
    patterns_compilation_to_validation = complete_config_obj["autograding"]["compilation_to_validation"]
    pattern_copy("compilation_to_validation",patterns_compilation_to_validation,tmp_compilation,tmp_work,tmp_logs)

    # remove the compilation directory
    shutil.rmtree(tmp_compilation)

    # copy output files to tmp_work directory
    copy_contents_into(job_id,test_output_path,tmp_work,tmp_logs)

    # copy any instructor custom validation code into the tmp work directory
    copy_contents_into(job_id,custom_validation_code_path,tmp_work,tmp_logs)

    subprocess.call(['ls', '-lR', '.'], stdout=open(tmp_logs + "/overall.txt", 'a'))

    # copy validator.out to the current directory
    shutil.copy (os.path.join(bin_path,"validate.out"),os.path.join(tmp_work,"my_validator.out"))

    # give the untrusted user read/write/execute permissions on the tmp directory & files
    add_permissions_recursive(tmp_work,
                              stat.S_IROTH | stat.S_IWOTH | stat.S_IXOTH,
                              stat.S_IROTH | stat.S_IWOTH | stat.S_IXOTH,
                              stat.S_IROTH | stat.S_IWOTH | stat.S_IXOTH)

    add_permissions(os.path.join(tmp_work,"my_validator.out"), stat.S_IXUSR | stat.S_IXGRP |stat.S_IROTH | stat.S_IWOTH | stat.S_IXOTH)

    #todo remove prints.
    print("VALIDATING")
    # validator the validator.out as the untrusted user
    with open(os.path.join(tmp_logs,"validator_log.txt"), 'w') as logfile:
        if USE_DOCKER:
            # WIP: This option file facilitated testing...
            #USE_DOCKER = os.path.isfile("/tmp/use_docker")
            #use_docker_string="grading begins, using DOCKER" if USE_DOCKER else "grading begins (not using docker)"
            #grade_items_logging.log_message(job_id,is_batch_job,which_untrusted,submission_path,message=use_docker_string)
            container = subprocess.check_output(['docker', 'run', '-t', '-d',
                                                 '-v', tmp + ':' + tmp,
                                                 'ubuntu:custom']).decode('utf8').strip()
            dockerlaunch_done=dateutils.get_current_time()
            dockerlaunch_time = (dockerlaunch_done-grading_began).total_seconds()
            grade_items_logging.log_message(job_id,is_batch_job,which_untrusted,item_name,"dcct:",dockerlaunch_time,"docker container created")

            validator_success = subprocess.call(['docker', 'exec', '-w', tmp_work, container,
                                                 os.path.join(tmp_work, 'my_validator.out'), queue_obj['gradeable'],
                                                 queue_obj['who'], str(queue_obj['version']), submission_string], stdout=logfile)
        else:
            validator_success = subprocess.call([os.path.join(SUBMITTY_INSTALL_DIR,"sbin","untrusted_execute"),
                                                 which_untrusted,
                                                 os.path.join(tmp_work,"my_validator.out"),
                                                 queue_obj["gradeable"],
                                                 queue_obj["who"],
                                                 str(queue_obj["version"]),
                                                 submission_string],
                                                stdout=logfile)

    if validator_success == 0:
        print (which_machine,which_untrusted,"VALIDATOR OK")
    else:
        print (which_machine,which_untrusted,"VALIDATOR FAILURE")
        grade_items_logging.log_message(job_id,is_batch_job,which_untrusted,item_name,message="VALIDATION FAILURE")

    untrusted_grant_rwx_access(which_untrusted,tmp_work)

    # grab the result of autograding
    grade_result = ""
    try:
        with open(os.path.join(tmp_work,"grade.txt")) as f:
            lines = f.readlines()
            for line in lines:
                line = line.rstrip('\n')
                if line.startswith("Automatic grading total:"):
                    grade_result = line
    except:
        with open(os.path.join(tmp_logs,"overall.txt"),'a') as f:
            print ("\n\nERROR: Grading incomplete -- Could not open ",os.path.join(tmp_work,"grade.txt"))
            grade_items_logging.log_message(job_id,is_batch_job,which_untrusted,item_name,message="ERROR: grade.txt does not exist")
            grade_items_logging.log_stack_trace(job_id,is_batch_job,which_untrusted,item_name,trace=traceback.format_exc())

    # --------------------------------------------------------------------
    # MAKE RESULTS DIRECTORY & COPY ALL THE FILES THERE
    tmp_results = os.path.join(tmp,"TMP_RESULTS")

    with open(os.path.join(tmp_logs,"overall.txt"),'a') as f:
        print ("====================================\nARCHIVING STARTS", file=f)

    subprocess.call(['ls', '-lR', '.'], stdout=open(tmp_logs + "/overall.txt", 'a'))

    os.makedirs(os.path.join(tmp_results,"details"))

    # remove the test_input directory, so we don't archive it!
    shutil.rmtree(os.path.join(tmp_work,"test_input"))

    # loop over the test case directories, and remove any files that are also in the test_input folder
    for testcase_num in range(1, len(my_testcases)+1):
        testcase_folder = os.path.join(tmp_work, "test{:02}".format(testcase_num))
        remove_test_input_files(os.path.join(tmp_logs,"overall.txt"),test_input_path,testcase_folder)

    patterns_work_to_details = complete_config_obj["autograding"]["work_to_details"]
    pattern_copy("work_to_details",patterns_work_to_details,tmp_work,os.path.join(tmp_results,"details"),tmp_logs)

    if ("work_to_public" in complete_config_obj["autograding"] and
        len(complete_config_obj["autograding"]["work_to_public"]) > 0):
        # create the directory
        os.makedirs(os.path.join(tmp_results,"results_public"))
        # copy the files
        patterns_work_to_public = complete_config_obj["autograding"]["work_to_public"]
        pattern_copy("work_to_public",patterns_work_to_public,tmp_work,os.path.join(tmp_results,"results_public"),tmp_logs)

    history_file_tmp = os.path.join(tmp_submission,"history.json")
    history_file = os.path.join(tmp_results,"history.json")
    if os.path.isfile(history_file_tmp):
        shutil.move(history_file_tmp,history_file)
        # fix permissions
        ta_group_id = os.stat(tmp_results).st_gid
        os.chown(history_file,int(DAEMON_UID),ta_group_id)
        add_permissions(history_file,stat.S_IRGRP)
    grading_finished = dateutils.get_current_time()

    try:
        shutil.copy(os.path.join(tmp_work,"grade.txt"),tmp_results)
    except:
        with open(os.path.join(tmp_logs,"overall.txt"),'a') as f:
            print ("\n\nERROR: Grading incomplete -- Could not copy ",os.path.join(tmp_work,"grade.txt"))
        grade_items_logging.log_message(job_id,is_batch_job,which_untrusted,item_name,message="ERROR: grade.txt does not exist")
        grade_items_logging.log_stack_trace(job_id,is_batch_job,which_untrusted,item_name,trace=traceback.format_exc())

    # -------------------------------------------------------------
    # create/append to the results history

    # grab the submission time
    with open (os.path.join(submission_path,".submit.timestamp")) as submission_time_file:
        submission_string = submission_time_file.read().rstrip()
    submission_datetime = dateutils.read_submitty_date(submission_string)

    gradeable_deadline_datetime = dateutils.read_submitty_date(gradeable_deadline_string)
    gradeable_deadline_longstring = dateutils.write_submitty_date(gradeable_deadline_datetime)
    submission_longstring = dateutils.write_submitty_date(submission_datetime)
    
    seconds_late = int((submission_datetime-gradeable_deadline_datetime).total_seconds())
    # note: negative = not late

    grading_finished_longstring = dateutils.write_submitty_date(grading_finished)

    gradingtime = (grading_finished-grading_began).total_seconds()

    with open(os.path.join(tmp_submission,"queue_file.json"), 'r') as infile:
        queue_obj = json.load(infile)
    queue_obj["gradingtime"]=gradingtime
    queue_obj["grade_result"]=grade_result
    queue_obj["which_untrusted"]=which_untrusted

    with open(os.path.join(tmp_results,"queue_file.json"),'w') as outfile:
        json.dump(queue_obj,outfile,sort_keys=True,indent=4,separators=(',', ': '))

    try:
        shutil.move(os.path.join(tmp_work, "results.json"), os.path.join(tmp_results, "results.json"))
    except:
        with open(os.path.join(tmp_logs,"overall.txt"),'a') as f:
            print ("\n\nERROR: Grading incomplete -- Could not open/write ",os.path.join(tmp_work,"results.json"))
            grade_items_logging.log_message(job_id,is_batch_job,which_untrusted,item_name,message="ERROR: results.json read/write error")
            grade_items_logging.log_stack_trace(job_id,is_batch_job,which_untrusted,item_name,trace=traceback.format_exc())

    write_grade_history.just_write_grade_history(history_file,
                                                 gradeable_deadline_longstring,
                                                 submission_longstring,
                                                 seconds_late,
                                                 queue_time_longstring,
                                                 is_batch_job_string,
                                                 grading_began_longstring,
                                                 int(waittime),
                                                 grading_finished_longstring,
                                                 int(gradingtime),
                                                 grade_result,
                                                 revision)

    os.chdir(SUBMITTY_DATA_DIR)

    if USE_DOCKER:
        with open(os.path.join(tmp_logs,"overall_log.txt"), 'w') as logfile:
            chmod_success = subprocess.call(['docker', 'exec', '-w', tmp_work, container,
                                             'chmod', '-R', 'ugo+rwx', '.'], stdout=logfile)

    with open(os.path.join(tmp_logs,"overall.txt"),'a') as f:
        f.write("FINISHED GRADING!\n")

    # save the logs!
    shutil.copytree(tmp_logs,os.path.join(tmp_results,"logs"))

    # zip up results folder
    filehandle, my_results_zip_file=tempfile.mkstemp()
    zip_my_directory(tmp_results,my_results_zip_file)
    os.close(filehandle)
    shutil.rmtree(tmp_autograding)
    shutil.rmtree(tmp_submission)
    shutil.rmtree(tmp_results)
    shutil.rmtree(tmp_work)
    shutil.rmtree(tmp)

    # WIP: extra logging for testing
    #grade_items_logging.log_message(job_id,is_batch_job,which_untrusted,submission_path,message="done grading")

    # --------------------------------------------------------------------
    # CLEAN UP DOCKER
    if USE_DOCKER:
        subprocess.call(['docker', 'rm', '-f', container])
        dockerdestroy_done=dateutils.get_current_time()
        dockerdestroy_time = (dockerdestroy_done-grading_finished).total_seconds()
        grade_items_logging.log_message(job_id,is_batch_job,which_untrusted,item_name,"ddt:",dockerdestroy_time,"docker container destroyed")
        
    grade_items_logging.log_message(job_id,is_batch_job,which_untrusted,item_name,"grade:",gradingtime,grade_result)

    return my_results_zip_file
Exemple #4
0
def prepare_autograding_and_submission_zip(which_machine, which_untrusted,
                                           next_directory, next_to_grade):
    os.chdir(SUBMITTY_DATA_DIR)

    # generate a random id to be used to track this job in the autograding logs
    job_id = ''.join(
        random.choice(string.ascii_letters + string.digits) for _ in range(6))

    # --------------------------------------------------------
    # figure out what we're supposed to grade & error checking
    obj = load_queue_file_obj(job_id, next_directory, next_to_grade)

    partial_path = os.path.join(obj["gradeable"], obj["who"],
                                str(obj["version"]))
    item_name = os.path.join(obj["semester"], obj["course"], "submissions",
                             partial_path)
    submission_path = os.path.join(SUBMITTY_DATA_DIR, "courses", item_name)
    if not os.path.isdir(submission_path):
        grade_items_logging.log_message(
            job_id,
            message="ERROR: the submission directory does not exist " +
            submission_path)
        raise RuntimeError("ERROR: the submission directory does not exist",
                           submission_path)
    print(which_machine, which_untrusted, "prepare zip", submission_path)
    is_vcs, vcs_type, vcs_base_url, vcs_subdirectory = get_vcs_info(
        SUBMITTY_DATA_DIR, obj["semester"], obj["course"], obj["gradeable"],
        obj["who"], obj["team"])

    is_batch_job = "regrade" in obj and obj["regrade"]
    is_batch_job_string = "BATCH" if is_batch_job else "INTERACTIVE"

    queue_time = get_queue_time(next_directory, next_to_grade)
    queue_time_longstring = dateutils.write_submitty_date(queue_time)
    grading_began = dateutils.get_current_time()
    waittime = (grading_began - queue_time).total_seconds()
    grade_items_logging.log_message(job_id, is_batch_job, "zip", item_name,
                                    "wait:", waittime, "")

    # --------------------------------------------------------
    # various paths
    provided_code_path = os.path.join(SUBMITTY_DATA_DIR, "courses",
                                      obj["semester"], obj["course"],
                                      "provided_code", obj["gradeable"])
    test_input_path = os.path.join(SUBMITTY_DATA_DIR, "courses",
                                   obj["semester"], obj["course"],
                                   "test_input", obj["gradeable"])
    test_output_path = os.path.join(SUBMITTY_DATA_DIR, "courses",
                                    obj["semester"], obj["course"],
                                    "test_output", obj["gradeable"])
    custom_validation_code_path = os.path.join(SUBMITTY_DATA_DIR, "courses",
                                               obj["semester"], obj["course"],
                                               "custom_validation_code",
                                               obj["gradeable"])
    bin_path = os.path.join(SUBMITTY_DATA_DIR, "courses", obj["semester"],
                            obj["course"], "bin", obj["gradeable"])
    form_json_config = os.path.join(SUBMITTY_DATA_DIR, "courses",
                                    obj["semester"], obj["course"], "config",
                                    "form",
                                    "form_" + obj["gradeable"] + ".json")
    complete_config = os.path.join(
        SUBMITTY_DATA_DIR, "courses", obj["semester"], obj["course"], "config",
        "complete_config", "complete_config_" + obj["gradeable"] + ".json")

    if not os.path.exists(form_json_config):
        grade_items_logging.log_message(
            job_id,
            message="ERROR: the form json file does not exist " +
            form_json_config)
        raise RuntimeError("ERROR: the form json file does not exist ",
                           form_json_config)
    if not os.path.exists(complete_config):
        grade_items_logging.log_message(
            job_id,
            message="ERROR: the complete config file does not exist " +
            complete_config)
        raise RuntimeError("ERROR: the complete config file does not exist ",
                           complete_config)

    # --------------------------------------------------------------------
    # MAKE TEMPORARY DIRECTORY & COPY THE NECESSARY FILES THERE
    tmp = tempfile.mkdtemp()
    tmp_autograding = os.path.join(tmp, "TMP_AUTOGRADING")
    os.mkdir(tmp_autograding)
    tmp_submission = os.path.join(tmp, "TMP_SUBMISSION")
    os.mkdir(tmp_submission)

    copytree_if_exists(provided_code_path,
                       os.path.join(tmp_autograding, "provided_code"))
    copytree_if_exists(test_input_path,
                       os.path.join(tmp_autograding, "test_input"))
    copytree_if_exists(test_output_path,
                       os.path.join(tmp_autograding, "test_output"))
    copytree_if_exists(custom_validation_code_path,
                       os.path.join(tmp_autograding, "custom_validation_code"))
    copytree_if_exists(bin_path, os.path.join(tmp_autograding, "bin"))
    shutil.copy(form_json_config, os.path.join(tmp_autograding, "form.json"))
    shutil.copy(complete_config,
                os.path.join(tmp_autograding, "complete_config.json"))

    checkout_path = os.path.join(SUBMITTY_DATA_DIR, "courses", obj["semester"],
                                 obj["course"], "checkout", partial_path)
    results_path = os.path.join(SUBMITTY_DATA_DIR, "courses", obj["semester"],
                                obj["course"], "results", partial_path)

    # grab a copy of the current history.json file (if it exists)
    history_file = os.path.join(results_path, "history.json")
    history_file_tmp = ""
    if os.path.isfile(history_file):
        shutil.copy(history_file, os.path.join(tmp_submission, "history.json"))
    # get info from the gradeable config file
    with open(complete_config, 'r') as infile:
        complete_config_obj = json.load(infile)

    checkout_subdirectory = complete_config_obj["autograding"].get(
        "use_checkout_subdirectory", "")
    checkout_subdir_path = os.path.join(checkout_path, checkout_subdirectory)
    queue_file = os.path.join(next_directory, next_to_grade)

    # switch to tmp directory
    os.chdir(tmp)

    # make the logs directory
    tmp_logs = os.path.join(tmp, "TMP_SUBMISSION", "tmp_logs")
    os.makedirs(tmp_logs)
    # 'touch' a file in the logs folder
    open(os.path.join(tmp_logs, "overall.txt"), 'a')

    # grab the submission time
    with open(os.path.join(submission_path,
                           ".submit.timestamp")) as submission_time_file:
        submission_string = submission_time_file.read().rstrip()
    submission_datetime = dateutils.read_submitty_date(submission_string)

    # --------------------------------------------------------------------
    # CHECKOUT THE STUDENT's REPO
    if is_vcs:

        # cleanup the previous checkout (if it exists)
        shutil.rmtree(checkout_path, ignore_errors=True)
        os.makedirs(checkout_path, exist_ok=True)

        try:
            # If we are public or private github, we will have an empty vcs_subdirectory
            if vcs_subdirectory == '':
                with open(os.path.join(
                        submission_path,
                        ".submit.VCS_CHECKOUT")) as submission_vcs_file:
                    VCS_JSON = json.load(submission_vcs_file)
                    git_user_id = VCS_JSON["git_user_id"]
                    git_repo_id = VCS_JSON["git_repo_id"]
                    if not valid_github_user_id(git_user_id):
                        raise Exception(
                            "Invalid GitHub user/organization name: '" +
                            git_user_id + "'")
                    if not valid_github_repo_id(git_repo_id):
                        raise Exception("Invalid GitHub repository name: '" +
                                        git_repo_id + "'")
                    # construct path for GitHub
                    vcs_path = "https://www.github.com/" + git_user_id + "/" + git_repo_id

            # is vcs_subdirectory standalone or should it be combined with base_url?
            elif vcs_subdirectory[0] == '/' or '://' in vcs_subdirectory:
                vcs_path = vcs_subdirectory
            else:
                if '://' in vcs_base_url:
                    vcs_path = urllib.parse.urljoin(vcs_base_url,
                                                    vcs_subdirectory)
                else:
                    vcs_path = os.path.join(vcs_base_url, vcs_subdirectory)

            with open(os.path.join(tmp_logs, "overall.txt"), 'a') as f:
                print("====================================\nVCS CHECKOUT",
                      file=f)
                print('vcs_base_url', vcs_base_url, file=f)
                print('vcs_subdirectory', vcs_subdirectory, file=f)
                print('vcs_path', vcs_path, file=f)
                print(['/usr/bin/git', 'clone', vcs_path, checkout_path],
                      file=f)

            # git clone may fail -- because repository does not exist,
            # or because we don't have appropriate access credentials
            try:
                subprocess.check_call(
                    ['/usr/bin/git', 'clone', vcs_path, checkout_path])
                os.chdir(checkout_path)

                # determine which version we need to checkout
                # if the repo is empty or the master branch does not exist, this command will fail
                try:
                    what_version = subprocess.check_output([
                        'git', 'rev-list', '-n', '1',
                        '--before="' + submission_string + '"', 'master'
                    ])
                    what_version = str(what_version.decode('utf-8')).rstrip()
                    if what_version == "":
                        # oops, pressed the grade button before a valid commit
                        shutil.rmtree(checkout_path, ignore_errors=True)
                    else:
                        # and check out the right version
                        subprocess.call(
                            ['git', 'checkout', '-b', 'grade', what_version])
                    os.chdir(tmp)
                    subprocess.call(['ls', '-lR', checkout_path],
                                    stdout=open(tmp_logs + "/overall.txt",
                                                'a'))
                    obj['revision'] = what_version

                # exception on git rev-list
                except subprocess.CalledProcessError as error:
                    grade_items_logging.log_message(
                        job_id,
                        message=
                        "ERROR: failed to determine version on master branch "
                        + str(error))
                    os.chdir(checkout_path)
                    with open(
                            os.path.join(
                                checkout_path,
                                "failed_to_determine_version_on_master_branch.txt"
                            ), 'w') as f:
                        print(str(error), file=f)
                        print("\n", file=f)
                        print(
                            "Check to be sure the repository is not empty.\n",
                            file=f)
                        print(
                            "Check to be sure the repository has a master branch.\n",
                            file=f)
                        print(
                            "And check to be sure the timestamps on the master branch are reasonable.\n",
                            file=f)

            # exception on git clone
            except subprocess.CalledProcessError as error:
                grade_items_logging.log_message(
                    job_id,
                    message="ERROR: failed to clone repository " + str(error))
                os.chdir(checkout_path)
                with open(
                        os.path.join(checkout_path,
                                     "failed_to_clone_repository.txt"),
                        'w') as f:
                    print(str(error), file=f)
                    print("\n", file=f)
                    print("Check to be sure the repository exists.\n", file=f)
                    print(
                        "And check to be sure the submitty_daemon user has appropriate access credentials.\n",
                        file=f)

        # exception in constructing full git repository url/path
        except Exception as error:
            grade_items_logging.log_message(
                job_id,
                message="ERROR: failed to construct valid repository url/path"
                + str(error))
            os.chdir(checkout_path)
            with open(
                    os.path.join(
                        checkout_path,
                        "failed_to_construct_valid_repository_url.txt"),
                    'w') as f:
                print(str(error), file=f)
                print("\n", file=f)
                print("Check to be sure the repository exists.\n", file=f)
                print(
                    "And check to be sure the submitty_daemon user has appropriate access credentials.\n",
                    file=f)

    copytree_if_exists(submission_path,
                       os.path.join(tmp_submission, "submission"))
    copytree_if_exists(checkout_path, os.path.join(tmp_submission, "checkout"))
    obj["queue_time"] = queue_time_longstring
    obj["regrade"] = is_batch_job
    obj["waittime"] = waittime
    obj["job_id"] = job_id

    with open(os.path.join(tmp_submission, "queue_file.json"), 'w') as outfile:
        json.dump(obj,
                  outfile,
                  sort_keys=True,
                  indent=4,
                  separators=(',', ': '))

    grading_began_longstring = dateutils.write_submitty_date(grading_began)
    with open(os.path.join(tmp_submission, ".grading_began"), 'w') as f:
        print(grading_began_longstring, file=f)

    # zip up autograding & submission folders
    filehandle1, my_autograding_zip_file = tempfile.mkstemp()
    filehandle2, my_submission_zip_file = tempfile.mkstemp()
    grade_item.zip_my_directory(tmp_autograding, my_autograding_zip_file)
    grade_item.zip_my_directory(tmp_submission, my_submission_zip_file)
    os.close(filehandle1)
    os.close(filehandle2)
    # cleanup
    shutil.rmtree(tmp_autograding)
    shutil.rmtree(tmp_submission)
    shutil.rmtree(tmp)

    #grade_items_logging.log_message(job_id,is_batch_job,"done zip",item_name)

    return (my_autograding_zip_file, my_submission_zip_file)
def anon_log(in_filename, out_filename):
    count = 0
    last_hour = 0

    wait_count = 0
    total_wait = 0

    cs1_grade_count = 0
    ds_grade_count = 0
    cs1_total_grade = 0
    ds_total_grade = 0

    with open(in_filename, 'r') as infile:
        with open(out_filename, 'w') as outfile:
            for line in infile:
                line = line.strip()
                tokens = line.split('|')
                if len(tokens) == 6:
                    # pre f17
                    timestamp = tokens[0]
                    process = tokens[1]
                    batch = tokens[2]
                    untrusted = "           "
                    which = tokens[3].strip()
                    waitgrade = tokens[4]
                    result = tokens[5]

                    things = which.split('__')
                    if len(things) != 5:
                        # discard unparseable things (only errors)
                        continue
                    semester = things[0]
                    course = things[1]
                    assignment = things[2]
                    user = things[3]
                    version = things[4]

                elif len(tokens) == 7:
                    # f17 or later
                    timestamp = tokens[0]
                    process = tokens[1]
                    batch = tokens[2]
                    untrusted = tokens[3]
                    which = tokens[4].strip()
                    waitgrade = tokens[5]
                    result = tokens[6]

                    things = which.split('/')
                    if len(things) != 6:
                        # discard unparseable things (only errors)
                        continue
                    semester = things[0]
                    course = things[1]
                    assignment = things[3]
                    user = things[4]
                    version = things[5]

                else:
                    # discard lines with bad format (usually errors)
                    continue

                if batch.strip() == "BATCH":
                    continue

                cs1 = course == "csci1100"
                ds = course == "csci1200"
                cs1ords = cs1 or ds

                #print("which ",waitgrade)
                info = waitgrade.split()
                if len(info) == 0:
                    continue
                val = float(info[1])
                if info[0] == "wait:":
                    count += 1

                if info[0] == "wait:" and cs1ords and val < 600:
                    total_wait += val
                    wait_count += 1
                if info[0] == "grade:" and cs1ords and val < 600:
                    if cs1:
                        cs1_total_grade += float(val)
                        cs1_grade_count += 1
                    if ds:
                        ds_total_grade += float(val)
                        ds_grade_count += 1

                when = dateutils.read_submitty_date(timestamp)
                if when.hour != last_hour and (
                        wait_count + cs1_grade_count + ds_grade_count > 0):
                    my_stats(in_filename, last_hour, count, wait_count,
                             total_wait, cs1_grade_count, cs1_total_grade,
                             ds_grade_count, ds_total_grade)
                    last_hour = when.hour
                    wait_count = 0
                    total_wait = 0
                    cs1_grade_count = 0
                    cs1_total_grade = 0
                    ds_grade_count = 0
                    ds_total_grade = 0
                    count = 0

        if (wait_count + cs1_grade_count + ds_grade_count > 0):
            my_stats(in_filename, last_hour, count, wait_count, total_wait,
                     cs1_grade_count, cs1_total_grade, ds_grade_count,
                     ds_total_grade)
Exemple #6
0
def main():
    args = arg_parse()
    data_dir = os.path.join(SUBMITTY_DATA_DIR, "courses")
    data_dirs = data_dir.split(os.sep)
    grade_queue = []
    if not args.times is None:
        starttime = dateutils.read_submitty_date(args.times[0])
        endtime = dateutils.read_submitty_date(args.times[1])
        replay(starttime,endtime)
        exit()
    if len(args.path) == 0:
        print ("ERROR! Must specify at least one path")
        exit()
    for input_path in args.path:
        print ('input path',input_path)
        # handle relative path
        if input_path == '.':
            input_path = os.getcwd()
        if input_path[0] != '/':
            input_path = os.getcwd() + '/' + input_path
        # remove trailing slash (if any)
        input_path = input_path.rstrip('/')
        # split the path into directories
        dirs = input_path.split(os.sep)

        # must be in the known submitty base data directory
        if dirs[0:len(data_dirs)] != data_dirs:
            print("ERROR: BAD REGRADE SUBMISSIONS PATH",input_path)
            raise SystemExit("You need to point to a directory within {}".format(data_dir))

        # Extract directories from provided pattern path (path may be incomplete)
        pattern_semester="*"
        if len(dirs) > len(data_dirs):
            pattern_semester=dirs[len(data_dirs)]
        pattern_course="*"
        if len(dirs) > len(data_dirs)+1:
            pattern_course=dirs[len(data_dirs)+1]
        if len(dirs) > len(data_dirs)+2:
            if (dirs[len(data_dirs)+2] != "submissions"):
                raise SystemExit("You must specify the submissions directory within the course")
        pattern_gradeable="*"
        if len(dirs) > len(data_dirs)+3:
            pattern_gradeable=dirs[len(data_dirs)+3]
        pattern_who="*"
        if len(dirs) > len(data_dirs)+4:
            pattern_who=dirs[len(data_dirs)+4]
        pattern_version="*"
        if len(dirs) > len(data_dirs)+5:
            pattern_version=dirs[len(data_dirs)+5]

        # full pattern may include wildcards!
        pattern = os.path.join(data_dir,pattern_semester,pattern_course,"submissions",pattern_gradeable,pattern_who,pattern_version)

        print("pattern: ",pattern)

        # Find all matching submissions
        for d in glob.glob(pattern):
            if os.path.isdir(d):
                my_dirs = d.split(os.sep)
                if len(my_dirs) != len(data_dirs)+6:
                    raise SystemExit("ERROR: directory length not as expected")
                # if requested, only regrade the currently active versions
                if args.active_only and not is_active_version(d):
                    continue
                print("match: ",d)
                my_semester=my_dirs[len(data_dirs)]
                my_course=my_dirs[len(data_dirs)+1]
                my_gradeable=my_dirs[len(data_dirs)+3]
                gradeable_config = os.path.join(data_dir,my_semester,my_course,"config/build/"+"build_"+my_gradeable+".json")
                with open(gradeable_config, 'r') as build_configuration:
                    datastore = json.load(build_configuration)
                    required_capabilities = datastore.get('required_capabilities', 'default')
                    max_grading_time = datastore.get('max_possible_grading_time', -1)

                #get the current time
                queue_time = dateutils.write_submitty_date()
                my_who=my_dirs[len(data_dirs)+4]
                my_version=my_dirs[len(data_dirs)+5]
                my_path=os.path.join(data_dir,my_semester,my_course,"submissions",my_gradeable,my_who,my_version)
                if my_path != d:
                    raise SystemExit("ERROR: path reconstruction failed")
                # add them to the queue

                if '_' not in my_who:
                    my_user = my_who
                    my_team = ""
                    my_is_team = False
                else:
                    my_user = ""
                    my_team = my_who
                    my_is_team = True

                grade_queue.append({"semester": my_semester,
                                    "course": my_course,
                                    "gradeable": my_gradeable,
                                    "user": my_user,
                                    "team": my_team,
                                    "who": my_who,
                                    "is_team": my_is_team,
                                    "version": my_version,
                                    "required_capabilities" : required_capabilities,
                                    "queue_time":queue_time,
                                    "regrade":True,
                                    "max_possible_grading_time" : max_grading_time})

    # Check before adding a very large number of systems to the queue
    if len(grade_queue) > 50 and not args.no_input:
        inp = input("Found {:d} matching submissions. Add to queue? [y/n]".format(len(grade_queue)))
        if inp.lower() not in ["yes", "y"]:
            raise SystemExit("Aborting...")

    for item in grade_queue:
        file_name = "__".join([item['semester'], item['course'], item['gradeable'], item['who'], item['version']])
        file_name = os.path.join(SUBMITTY_DATA_DIR, "to_be_graded_queue", file_name)
        with open(file_name, "w") as open_file:
            json.dump(item, open_file, sort_keys=True, indent=4)
        os.system("chmod o+rw {}".format(file_name))

    print("Added {:d} to the queue for regrading.".format(len(grade_queue)))
Exemple #7
0
def replay(starttime,endtime):
    replay_starttime=datetime.datetime.now()
    print (replay_starttime,"replay start: ",starttime)

    # error checking
    if not (starttime.year == endtime.year and
            starttime.month == endtime.month and
            starttime.day == endtime.day):
        print ("ERROR!  invalid replay range ",starttime,"->",endtime, " (must be same day)")
        exit()
    if starttime >= endtime:
        print ("ERROR!  invalid replay range ",starttime,"->",endtime, " (invalid times)")
        exit()

    # file the correct file
    file = '/var/local/submitty/logs/autograding/{:d}{:02d}{:02d}.txt'.format(starttime.year,starttime.month,starttime.day)
    with open(file,'r') as lines:
        for line in lines:
            things = line.split('|')
            original_time = dateutils.read_submitty_date(things[0])
            # skip items outside of this time range
            if (original_time < starttime or
                original_time > endtime):
                continue
            # skip batch items
            if (things[2].strip() == "BATCH"):
                continue
            # only process the "wait" time (when we started grading the item)
            iswait=things[5].strip()[0:5]
            if (iswait != "wait:"):
                continue
            waittime=float(things[5].split()[1])
            # grab the job name
            my_job = things[4].strip()
            if my_job == "":
                continue
            what = my_job.split('/')
            # for now, only interested in Data Structures and Computer Science 1
            if not (what[1]=="csci1200" or what[1]=="csci1100"):
                continue
            # calculate when this job should be relaunched
            time_multipler=1.0
            pause_time=replay_starttime+(time_multiplier*(original_time-starttime))
            pause.until(pause_time)
            queue_time = dateutils.write_submitty_date()
            print(datetime.datetime.now(),"      REPLAY: ",original_time," ",my_job)
            # FIXME : This will need to be adjust for team assigments
            # and assignments with special required capabilities!
            item = {"semester": what[0],
                    "course": what[1],
                    "gradeable": what[3],
                    "user": what[4],
                    "team": "",
                    "who": what[4],
                    "is_team": False,
                    "version": what[5],
                    "required_capabilities": "default",
                    "queue_time": queue_time,
                    "regrade": True,
                    "max_possible_grading_time" : -1 }
            file_name = "__".join([item['semester'], item['course'], item['gradeable'], item['who'], item['version']])
            file_name = os.path.join(SUBMITTY_DATA_DIR, "to_be_graded_queue", file_name)
            with open(file_name, "w") as open_file:
                json.dump(item, open_file, sort_keys=True, indent=4)
                os.system("chmod o+rw {}".format(file_name))  
    print (datetime.datetime.now(),"replay end: ",endtime)
Exemple #8
0
def archive_autograding_results(config, working_directory: os.PathLike,
                                job_id: str, which_untrusted: str,
                                is_batch_job: bool, complete_config_obj: dict,
                                gradeable_config_obj: dict, queue_obj: dict,
                                is_test_environment: bool):
    """ After grading is finished, archive the results. """

    tmp_autograding = os.path.join(working_directory, "TMP_AUTOGRADING")
    tmp_submission = os.path.join(working_directory, "TMP_SUBMISSION")
    tmp_work = os.path.join(working_directory, "TMP_WORK")
    tmp_logs = os.path.join(working_directory, "TMP_SUBMISSION", "tmp_logs")
    tmp_results = os.path.join(working_directory, "TMP_RESULTS")
    submission_path = os.path.join(tmp_submission, "submission")
    random_output_path = os.path.join(tmp_work, 'random_output')

    if "generate_output" not in queue_obj:
        partial_path = os.path.join(queue_obj["gradeable"], queue_obj["who"],
                                    str(queue_obj["version"]))
        item_name = os.path.join(queue_obj["semester"], queue_obj["course"],
                                 "submissions", partial_path)
    elif queue_obj["generate_output"]:
        item_name = os.path.join(queue_obj["semester"], queue_obj["course"],
                                 "generated_output", queue_obj["gradeable"])
    results_public_dir = os.path.join(tmp_results, "results_public")
    results_details_dir = os.path.join(tmp_results, "details")
    patterns = complete_config_obj['autograding']

    # Copy work to details
    pattern_copy("work_to_details", patterns['work_to_details'], tmp_work,
                 results_details_dir, tmp_logs)

    # Copy work to public
    if 'work_to_public' in patterns:
        pattern_copy("work_to_public", patterns['work_to_public'], tmp_work,
                     results_public_dir, tmp_logs)

    if os.path.exists(random_output_path):
        pattern_copy("work_to_random_output", [
            os.path.join(random_output_path, '**', '*.txt'),
        ], tmp_work, tmp_results, tmp_logs)
    # timestamp of first access to the gradeable page
    first_access_string = ""
    # grab the submission time
    if "generate_output" in queue_obj and queue_obj["generate_output"]:
        submission_string = ""
    else:
        with open(
                os.path.join(tmp_submission, 'submission',
                             ".submit.timestamp"),
                'r') as submission_time_file:
            submission_string = submission_time_file.read().rstrip()
        # grab the first access to the gradeable page (if it exists)
        user_assignment_access_filename = os.path.join(
            tmp_submission, ".user_assignment_access.json")
        if os.path.exists(user_assignment_access_filename):
            with open(user_assignment_access_filename, 'r') as access_file:
                obj = json.load(access_file)
                first_access_string = obj[0]["timestamp"]

    history_file_tmp = os.path.join(tmp_submission, "history.json")
    history_file = os.path.join(tmp_results, "history.json")
    if os.path.isfile(history_file_tmp) and not is_test_environment:
        shutil.move(history_file_tmp, history_file)
        # fix permissions
        ta_group_id = os.stat(tmp_results).st_gid
        os.chown(history_file, int(config.submitty_users['daemon_uid']),
                 ta_group_id)
        add_permissions(history_file, stat.S_IRGRP)
    grading_finished = dateutils.get_current_time()
    grade_result = ""
    if "generate_output" not in queue_obj:
        try:
            shutil.copy(os.path.join(tmp_work, "grade.txt"), tmp_results)
            with open(os.path.join(tmp_work, "grade.txt")) as f:
                lines = f.readlines()
                for line in lines:
                    line = line.rstrip('\n')
                    if line.startswith("Automatic grading total:"):
                        grade_result = line
        except Exception as e:
            with open(os.path.join(tmp_logs, "overall.txt"), 'a') as f:
                f.write(
                    f"\n\nERROR: Grading incomplete -- Could not process {os.path.join(tmp_work,'grade.txt')}"
                )
            config.logger.log_message(
                "ERROR: could not process grade.txt. See stack trace entry for more details.",
                job_id=job_id,
                is_batch=is_batch_job,
                which_untrusted=which_untrusted,
                jobname=item_name,
            )
            config.logger.log_stack_trace(
                traceback.format_exc(),
                job_id=job_id,
                is_batch=is_batch_job,
                which_untrusted=which_untrusted,
                jobname=item_name,
            )

        submission_datetime = dateutils.read_submitty_date(submission_string)
        submission_longstring = dateutils.write_submitty_date(
            submission_datetime)

        # compute lateness (if there is a due date / submission deadline)
        gradeable_deadline_string = gradeable_config_obj["date_due"]
        if gradeable_deadline_string is None:
            print("NO DEADLINE")
            gradeable_deadline_longstring = "None"
            seconds_late = 0
        else:
            print("DEADLINE IS '" + str(gradeable_deadline_string) + "'")
            gradeable_deadline_datetime = dateutils.read_submitty_date(
                gradeable_deadline_string)
            gradeable_deadline_longstring = dateutils.write_submitty_date(
                gradeable_deadline_datetime)
            seconds_late = int((submission_datetime -
                                gradeable_deadline_datetime).total_seconds())

        # compute the access duration in seconds (if it exists)
        access_duration = -1
        if first_access_string != "":
            first_access_datetime = dateutils.read_submitty_date(
                first_access_string)
            access_duration = int(
                (submission_datetime - first_access_datetime).total_seconds())

        # note: negative = not late
        grading_finished_longstring = dateutils.write_submitty_date(
            grading_finished)

        with open(os.path.join(tmp_submission, ".grading_began"), 'r') as f:
            grading_began_longstring = f.read()
        grading_began = dateutils.read_submitty_date(grading_began_longstring)

        gradingtime = (grading_finished - grading_began).total_seconds()

        queue_obj["gradingtime"] = gradingtime
        queue_obj["grade_result"] = grade_result
        queue_obj["which_untrusted"] = which_untrusted
        waittime = queue_obj["waittime"]

        try:

            # Make certain results.json is utf-8 encoded.
            results_json_path = os.path.join(tmp_work, 'results.json')
            with codecs.open(results_json_path,
                             'r',
                             encoding='utf-8',
                             errors='ignore') as infile:
                results_str = "".join(line.rstrip() for line in infile)
                results_obj = json.loads(results_str)
            with open(results_json_path, 'w') as outfile:
                json.dump(results_obj, outfile, indent=4)

            shutil.move(results_json_path,
                        os.path.join(tmp_results, "results.json"))
        except Exception as e:
            with open(os.path.join(tmp_logs, "overall.txt"), 'a') as f:
                f.write(
                    f"\n\nERROR: Grading incomplete -- Could not open/write {os.path.join(tmp_work,'results.json')}"
                )
            config.logger.log_message(
                "ERROR: results.json read/write error",
                job_id=job_id,
                is_batch=is_batch_job,
                which_untrusted=which_untrusted,
                jobname=item_name,
            )
            config.logger.log_stack_trace(
                traceback.format_exc(),
                job_id=job_id,
                is_batch=is_batch_job,
                which_untrusted=which_untrusted,
                jobname=item_name,
            )

        # Rescue custom validator files
        custom_validator_output_directory = os.path.join(
            tmp_results, "custom_validator_output")
        pattern_copy("rescue_custom_validator_validation_jsons", [
            os.path.join(tmp_work, 'validation_results_*.json'),
        ], tmp_work, custom_validator_output_directory, tmp_logs)
        pattern_copy("rescue_custom_validator_logs", [
            os.path.join(tmp_work, 'validation_logfile_*.txt'),
        ], tmp_work, custom_validator_output_directory, tmp_logs)
        pattern_copy("rescue_custom_validator_errors", [
            os.path.join(tmp_work, 'validation_stderr_*.txt'),
        ], tmp_work, custom_validator_output_directory, tmp_logs)

        just_write_grade_history(history_file, gradeable_deadline_longstring,
                                 submission_longstring, seconds_late,
                                 first_access_string, access_duration,
                                 queue_obj["queue_time"],
                                 "BATCH" if is_batch_job else "INTERACTIVE",
                                 grading_began_longstring,
                                 int(waittime), grading_finished_longstring,
                                 int(gradingtime), grade_result,
                                 queue_obj.get("revision", None))

        with open(os.path.join(tmp_logs, "overall.txt"), 'a') as f:
            f.write("FINISHED GRADING!\n")

        config.logger.log_message(grade_result,
                                  job_id=job_id,
                                  is_batch=is_batch_job,
                                  which_untrusted=which_untrusted,
                                  jobname=item_name,
                                  timelabel="grade:",
                                  elapsed_time=gradingtime)

    with open(os.path.join(tmp_results, "queue_file.json"), 'w') as outfile:
        json.dump(queue_obj,
                  outfile,
                  sort_keys=True,
                  indent=4,
                  separators=(',', ': '))

    # save the logs!
    shutil.copytree(tmp_logs, os.path.join(tmp_results, "logs"))

    # Save the .submit.notebook
    # Copy the .submit.notebook to tmp_work for validation
    submit_notebook_path = os.path.join(tmp_submission, 'submission',
                                        ".submit.notebook")
    if os.path.exists(submit_notebook_path):
        shutil.copy(submit_notebook_path,
                    os.path.join(tmp_results, ".submit.notebook"))
Exemple #9
0
def grade_from_zip(my_autograding_zip_file, my_submission_zip_file,
                   which_untrusted):

    os.chdir(SUBMITTY_DATA_DIR)
    tmp = os.path.join("/var/local/submitty/autograding_tmp/", which_untrusted,
                       "tmp")

    if os.path.exists(tmp):
        untrusted_grant_rwx_access(which_untrusted, tmp)
        add_permissions_recursive(
            tmp, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR | stat.S_IRGRP
            | stat.S_IWGRP | stat.S_IXGRP | stat.S_IROTH | stat.S_IWOTH
            | stat.S_IXOTH, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR
            | stat.S_IRGRP | stat.S_IWGRP | stat.S_IXGRP | stat.S_IROTH
            | stat.S_IWOTH | stat.S_IXOTH, stat.S_IRUSR | stat.S_IWUSR
            | stat.S_IXUSR | stat.S_IRGRP | stat.S_IWGRP | stat.S_IXGRP
            | stat.S_IROTH | stat.S_IWOTH | stat.S_IXOTH)

    # Remove any and all containers left over from past runs.
    old_containers = subprocess.check_output(
        ['docker', 'ps', '-aq', '-f',
         'name={0}'.format(which_untrusted)]).split()

    for old_container in old_containers:
        subprocess.call(['docker', 'rm', '-f', old_container.decode('utf8')])

    # clean up old usage of this directory
    shutil.rmtree(tmp, ignore_errors=True)
    os.mkdir(tmp)

    which_machine = socket.gethostname()

    # unzip autograding and submission folders
    tmp_autograding = os.path.join(tmp, "TMP_AUTOGRADING")
    tmp_submission = os.path.join(tmp, "TMP_SUBMISSION")
    try:
        unzip_this_file(my_autograding_zip_file, tmp_autograding)
        unzip_this_file(my_submission_zip_file, tmp_submission)
    except:
        raise
    os.remove(my_autograding_zip_file)
    os.remove(my_submission_zip_file)

    tmp_logs = os.path.join(tmp, "TMP_SUBMISSION", "tmp_logs")

    queue_file = os.path.join(tmp_submission, "queue_file.json")
    with open(queue_file, 'r') as infile:
        queue_obj = json.load(infile)

    queue_time_longstring = queue_obj["queue_time"]
    waittime = queue_obj["waittime"]
    is_batch_job = queue_obj["regrade"]
    job_id = queue_obj["job_id"]
    is_batch_job_string = "BATCH" if is_batch_job else "INTERACTIVE"
    revision = queue_obj.get("revision", None)

    partial_path = os.path.join(queue_obj["gradeable"], queue_obj["who"],
                                str(queue_obj["version"]))
    item_name = os.path.join(queue_obj["semester"], queue_obj["course"],
                             "submissions", partial_path)

    grade_items_logging.log_message(job_id, is_batch_job, which_untrusted,
                                    item_name, "wait:", waittime, "")

    with open(os.path.join(tmp_submission, ".grading_began"), 'r') as f:
        grading_began_longstring = f.read()
    grading_began = dateutils.read_submitty_date(grading_began_longstring)

    submission_path = os.path.join(tmp_submission, "submission")
    checkout_path = os.path.join(tmp_submission, "checkout")

    provided_code_path = os.path.join(tmp_autograding, "provided_code")
    test_input_path = os.path.join(tmp_autograding, "test_input")
    test_output_path = os.path.join(tmp_autograding, "test_output")
    custom_validation_code_path = os.path.join(tmp_autograding,
                                               "custom_validation_code")
    bin_path = os.path.join(tmp_autograding, "bin")
    form_json_config = os.path.join(tmp_autograding, "form.json")
    complete_config = os.path.join(tmp_autograding, "complete_config.json")

    with open(form_json_config, 'r') as infile:
        gradeable_config_obj = json.load(infile)
    gradeable_deadline_string = gradeable_config_obj["date_due"]

    with open(complete_config, 'r') as infile:
        complete_config_obj = json.load(infile)

    is_vcs = gradeable_config_obj["upload_type"] == "repository"
    checkout_subdirectory = complete_config_obj["autograding"].get(
        "use_checkout_subdirectory", "")
    checkout_subdir_path = os.path.join(checkout_path, checkout_subdirectory)

    if complete_config_obj.get('one_part_only', False):
        allow_only_one_part(submission_path,
                            os.path.join(tmp_logs, "overall.txt"))
        if is_vcs:
            with open(os.path.join(tmp_logs, "overall.txt"), 'a') as f:
                print(
                    "WARNING:  ONE_PART_ONLY OPTION DOES NOT MAKE SENSE WITH VCS SUBMISSION",
                    file=f)

    # --------------------------------------------------------------------
    # START DOCKER

    # NOTE: DOCKER SUPPORT PRELIMINARY -- NEEDS MORE SECURITY BEFORE DEPLOYED ON LIVE SERVER
    complete_config = os.path.join(tmp_autograding, "complete_config.json")
    with open(complete_config, 'r') as infile:
        complete_config_obj = json.load(infile)

    # Save ourselves if autograding_method is None.
    autograding_method = complete_config_obj.get("autograding_method", "")
    USE_DOCKER = True if autograding_method == "docker" else False

    # --------------------------------------------------------------------
    # COMPILE THE SUBMITTED CODE

    with open(os.path.join(tmp_logs, "overall.txt"), 'a') as f:
        print("====================================\nCOMPILATION STARTS",
              file=f)

    # copy submitted files to the tmp compilation directory
    tmp_compilation = os.path.join(tmp, "TMP_COMPILATION")
    os.mkdir(tmp_compilation)
    os.chdir(tmp_compilation)

    submission_path = os.path.join(tmp_submission, "submission")
    checkout_path = os.path.join(tmp_submission, "checkout")

    provided_code_path = os.path.join(tmp_autograding, "provided_code")
    test_input_path = os.path.join(tmp_autograding, "test_input")
    test_output_path = os.path.join(tmp_autograding, "test_output")
    custom_validation_code_path = os.path.join(tmp_autograding,
                                               "custom_validation_code")
    bin_path = os.path.join(tmp_autograding, "bin")
    form_json_config = os.path.join(tmp_autograding, "form.json")

    with open(form_json_config, 'r') as infile:
        gradeable_config_obj = json.load(infile)
    gradeable_deadline_string = gradeable_config_obj["date_due"]

    patterns_submission_to_compilation = complete_config_obj["autograding"][
        "submission_to_compilation"]

    add_permissions(tmp_logs, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR)

    if USE_DOCKER:
        print("!!!!!!!!!!!!!!!!!!USING DOCKER!!!!!!!!!!!!!!!!!!!!!!!!")

    with open(complete_config, 'r') as infile:
        config = json.load(infile)
        my_testcases = config['testcases']

    # grab the submission time
    with open(os.path.join(submission_path, ".submit.timestamp"),
              'r') as submission_time_file:
        submission_string = submission_time_file.read().rstrip()

    with open(os.path.join(tmp_logs, "compilation_log.txt"), 'w') as logfile:
        # we start counting from one.
        executable_path_list = list()
        for testcase_num in range(1, len(my_testcases) + 1):
            testcase_folder = os.path.join(tmp_compilation,
                                           "test{:02}".format(testcase_num))

            if 'type' in my_testcases[testcase_num - 1]:
                if my_testcases[testcase_num -
                                1]['type'] != 'FileCheck' and my_testcases[
                                    testcase_num - 1]['type'] != 'Compilation':
                    continue

                if my_testcases[testcase_num - 1]['type'] == 'Compilation':
                    if 'executable_name' in my_testcases[testcase_num - 1]:
                        provided_executable_list = my_testcases[
                            testcase_num - 1]['executable_name']
                        if not isinstance(provided_executable_list, (list, )):
                            provided_executable_list = list(
                                [provided_executable_list])
                        for executable_name in provided_executable_list:
                            if executable_name.strip() == '':
                                continue
                            executable_path = os.path.join(
                                testcase_folder, executable_name)
                            executable_path_list.append(
                                (executable_path, executable_name))
            else:
                continue

            os.makedirs(testcase_folder)

            pattern_copy("submission_to_compilation",
                         patterns_submission_to_compilation, submission_path,
                         testcase_folder, tmp_logs)

            if is_vcs:
                pattern_copy("checkout_to_compilation",
                             patterns_submission_to_compilation,
                             checkout_subdir_path, testcase_folder, tmp_logs)

            # copy any instructor provided code files to tmp compilation directory
            copy_contents_into(job_id, provided_code_path, testcase_folder,
                               tmp_logs)

            # copy compile.out to the current directory
            shutil.copy(os.path.join(bin_path, "compile.out"),
                        os.path.join(testcase_folder, "my_compile.out"))
            add_permissions(
                os.path.join(testcase_folder, "my_compile.out"), stat.S_IXUSR
                | stat.S_IXGRP | stat.S_IROTH | stat.S_IWOTH | stat.S_IXOTH)
            #untrusted_grant_rwx_access(which_untrusted, tmp_compilation)
            untrusted_grant_rwx_access(which_untrusted, testcase_folder)
            add_permissions_recursive(
                testcase_folder, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR
                | stat.S_IRGRP | stat.S_IWGRP | stat.S_IXGRP | stat.S_IROTH
                | stat.S_IWOTH | stat.S_IXOTH, stat.S_IRUSR | stat.S_IWUSR
                | stat.S_IXUSR | stat.S_IRGRP | stat.S_IWGRP | stat.S_IXGRP
                | stat.S_IROTH | stat.S_IWOTH | stat.S_IXOTH, stat.S_IRUSR
                | stat.S_IWUSR | stat.S_IXUSR | stat.S_IRGRP | stat.S_IWGRP
                | stat.S_IXGRP | stat.S_IROTH | stat.S_IWOTH | stat.S_IXOTH)

            if USE_DOCKER:
                try:
                    #There can be only one container for a compilation step, so grab its container image
                    #TODO: set default in load_config_json.cpp
                    if my_testcases[testcase_num - 1]['type'] == 'FileCheck':
                        print(
                            "performing filecheck in default ubuntu:custom container"
                        )
                        container_image = "ubuntu:custom"
                    else:
                        container_image = my_testcases[
                            testcase_num -
                            1]["containers"][0]["container_image"]
                        print(
                            'creating a compilation container with image {0}'.
                            format(container_image))
                    untrusted_uid = str(getpwnam(which_untrusted).pw_uid)

                    compilation_container = None
                    compilation_container = subprocess.check_output([
                        'docker',
                        'create',
                        '-i',
                        '-u',
                        untrusted_uid,
                        '--network',
                        'none',
                        '-v',
                        testcase_folder + ':' + testcase_folder,
                        '-w',
                        testcase_folder,
                        container_image,
                        #The command to be run.
                        os.path.join(testcase_folder, 'my_compile.out'),
                        queue_obj['gradeable'],
                        queue_obj['who'],
                        str(queue_obj['version']),
                        submission_string,
                        '--testcase',
                        str(testcase_num)
                    ]).decode('utf8').strip()
                    print("starting container")
                    compile_success = subprocess.call(
                        ['docker', 'start', '-i', compilation_container],
                        stdout=logfile,
                        cwd=testcase_folder)
                except Exception as e:
                    print('An error occurred when compiling with docker.')
                    grade_items_logging.log_stack_trace(
                        job_id,
                        is_batch_job,
                        which_untrusted,
                        item_name,
                        trace=traceback.format_exc())
                finally:
                    if compilation_container != None:
                        subprocess.call(
                            ['docker', 'rm', '-f', compilation_container])
                        print("cleaned up compilation container.")
            else:
                compile_success = subprocess.call([
                    os.path.join(SUBMITTY_INSTALL_DIR, "sbin",
                                 "untrusted_execute"), which_untrusted,
                    os.path.join(testcase_folder, "my_compile.out"),
                    queue_obj["gradeable"], queue_obj["who"],
                    str(queue_obj["version"]), submission_string, '--testcase',
                    str(testcase_num)
                ],
                                                  stdout=logfile,
                                                  cwd=testcase_folder)
            # remove the compilation program
            untrusted_grant_rwx_access(which_untrusted, testcase_folder)
            os.remove(os.path.join(testcase_folder, "my_compile.out"))

    if compile_success == 0:
        print(which_machine, which_untrusted, "COMPILATION OK")
    else:
        print(which_machine, which_untrusted, "COMPILATION FAILURE")
        grade_items_logging.log_message(job_id,
                                        is_batch_job,
                                        which_untrusted,
                                        item_name,
                                        message="COMPILATION FAILURE")
    add_permissions_recursive(
        tmp_compilation, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR
        | stat.S_IRGRP | stat.S_IWGRP | stat.S_IXGRP | stat.S_IROTH
        | stat.S_IWOTH | stat.S_IXOTH, stat.S_IRUSR | stat.S_IWUSR
        | stat.S_IXUSR | stat.S_IRGRP | stat.S_IWGRP | stat.S_IXGRP
        | stat.S_IROTH | stat.S_IWOTH | stat.S_IXOTH, stat.S_IRUSR
        | stat.S_IWUSR | stat.S_IXUSR | stat.S_IRGRP | stat.S_IWGRP
        | stat.S_IXGRP | stat.S_IROTH | stat.S_IWOTH | stat.S_IXOTH)

    # return to the main tmp directory
    os.chdir(tmp)

    # --------------------------------------------------------------------
    # make the runner directory

    with open(os.path.join(tmp_logs, "overall.txt"), 'a') as f:
        print("====================================\nRUNNER STARTS", file=f)

    tmp_work = os.path.join(tmp, "TMP_WORK")
    tmp_work_test_input = os.path.join(tmp_work, "test_input")
    tmp_work_submission = os.path.join(tmp_work, "submitted_files")
    tmp_work_compiled = os.path.join(tmp_work, "compiled_files")
    tmp_work_checkout = os.path.join(tmp_work, "checkout")

    os.mkdir(tmp_work)

    os.mkdir(tmp_work_test_input)
    os.mkdir(tmp_work_submission)
    os.mkdir(tmp_work_compiled)
    os.mkdir(tmp_work_checkout)

    os.chdir(tmp_work)

    # move all executable files from the compilation directory to the main tmp directory
    # Note: Must preserve the directory structure of compiled files (esp for Java)

    patterns_submission_to_runner = complete_config_obj["autograding"][
        "submission_to_runner"]

    pattern_copy("submission_to_runner", patterns_submission_to_runner,
                 submission_path, tmp_work_submission, tmp_logs)
    if is_vcs:
        pattern_copy("checkout_to_runner", patterns_submission_to_runner,
                     checkout_subdir_path, tmp_work_checkout, tmp_logs)

    # move the compiled files into the tmp_work_compiled directory
    for path, name in executable_path_list:
        if not os.path.isfile(path):
            continue
        target_path = os.path.join(tmp_work_compiled, name)
        if not os.path.exists(target_path):
            os.makedirs(os.path.dirname(target_path), exist_ok=True)
        shutil.copy(path, target_path)
        print('copied over {0}'.format(target_path))
        with open(os.path.join(tmp_logs, "overall.txt"), 'a') as f:
            print('grade_item: copied over {0}'.format(target_path), file=f)

    patterns_compilation_to_runner = complete_config_obj["autograding"][
        "compilation_to_runner"]
    #copy into the actual tmp_work directory for archiving/validating
    pattern_copy("compilation_to_runner", patterns_compilation_to_runner,
                 tmp_compilation, tmp_work, tmp_logs)
    #copy into tmp_work_compiled, which is provided to each testcase
    # TODO change this as our methodology for declaring testcase dependencies becomes more robust
    pattern_copy("compilation_to_runner", patterns_compilation_to_runner,
                 tmp_compilation, tmp_work_compiled, tmp_logs)

    # copy input files to tmp_work directory
    copy_contents_into(job_id, test_input_path, tmp_work_test_input, tmp_logs)

    subprocess.call(['ls', '-lR', '.'],
                    stdout=open(tmp_logs + "/overall.txt", 'a'))

    # copy runner.out to the current directory
    shutil.copy(os.path.join(bin_path, "run.out"),
                os.path.join(tmp_work, "my_runner.out"))

    #set the appropriate permissions for the newly created directories
    #TODO replaces commented out code below

    add_permissions(
        os.path.join(tmp_work, "my_runner.out"), stat.S_IXUSR | stat.S_IXGRP
        | stat.S_IROTH | stat.S_IWOTH | stat.S_IXOTH)
    add_permissions(tmp_work_submission,
                    stat.S_IROTH | stat.S_IWOTH | stat.S_IXOTH)
    add_permissions(tmp_work_compiled,
                    stat.S_IROTH | stat.S_IWOTH | stat.S_IXOTH)
    add_permissions(tmp_work_checkout,
                    stat.S_IROTH | stat.S_IWOTH | stat.S_IXOTH)

    #TODO this is how permissions used to be set. It was removed because of the way it interacts with the sticky bit.
    ## give the untrusted user read/write/execute permissions on the tmp directory & files
    # os.system('ls -al {0}'.format(tmp_work))
    # add_permissions_recursive(tmp_work,
    #                           stat.S_IROTH | stat.S_IWOTH | stat.S_IXOTH,
    #                           stat.S_IROTH | stat.S_IWOTH | stat.S_IXOTH,
    #                           stat.S_IROTH | stat.S_IWOTH | stat.S_IXOTH)

    ##################################################################################################
    #call grade_item_main_runner.py
    runner_success = grade_item_main_runner.executeTestcases(
        complete_config_obj, tmp_logs, tmp_work, queue_obj, submission_string,
        item_name, USE_DOCKER, None, which_untrusted, job_id, grading_began)
    ##################################################################################################

    if runner_success == 0:
        print(which_machine, which_untrusted, "RUNNER OK")
    else:
        print(which_machine, which_untrusted, "RUNNER FAILURE")
        grade_items_logging.log_message(job_id,
                                        is_batch_job,
                                        which_untrusted,
                                        item_name,
                                        message="RUNNER FAILURE")

    add_permissions_recursive(
        tmp_work, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR | stat.S_IRGRP
        | stat.S_IWGRP | stat.S_IXGRP | stat.S_IROTH | stat.S_IWOTH
        | stat.S_IXOTH, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR
        | stat.S_IRGRP | stat.S_IWGRP | stat.S_IXGRP | stat.S_IROTH
        | stat.S_IWOTH | stat.S_IXOTH, stat.S_IRUSR | stat.S_IWUSR
        | stat.S_IXUSR | stat.S_IRGRP | stat.S_IWGRP | stat.S_IXGRP
        | stat.S_IROTH | stat.S_IWOTH | stat.S_IXOTH)
    add_permissions_recursive(
        tmp_compilation, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR
        | stat.S_IRGRP | stat.S_IWGRP | stat.S_IXGRP | stat.S_IROTH
        | stat.S_IWOTH | stat.S_IXOTH, stat.S_IRUSR | stat.S_IWUSR
        | stat.S_IXUSR | stat.S_IRGRP | stat.S_IWGRP | stat.S_IXGRP
        | stat.S_IROTH | stat.S_IWOTH | stat.S_IXOTH, stat.S_IRUSR
        | stat.S_IWUSR | stat.S_IXUSR | stat.S_IRGRP | stat.S_IWGRP
        | stat.S_IXGRP | stat.S_IROTH | stat.S_IWOTH | stat.S_IXOTH)

    # --------------------------------------------------------------------
    # RUN VALIDATOR
    with open(os.path.join(tmp_logs, "overall.txt"), 'a') as f:
        print("====================================\nVALIDATION STARTS",
              file=f)

    # copy results files from compilation...
    patterns_submission_to_validation = complete_config_obj["autograding"][
        "submission_to_validation"]
    pattern_copy("submission_to_validation", patterns_submission_to_validation,
                 submission_path, tmp_work, tmp_logs)
    if is_vcs:
        pattern_copy("checkout_to_validation",
                     patterns_submission_to_validation, checkout_subdir_path,
                     tmp_work, tmp_logs)
    patterns_compilation_to_validation = complete_config_obj["autograding"][
        "compilation_to_validation"]
    pattern_copy("compilation_to_validation",
                 patterns_compilation_to_validation, tmp_compilation, tmp_work,
                 tmp_logs)

    # remove the compilation directory
    shutil.rmtree(tmp_compilation)

    # copy output files to tmp_work directory
    copy_contents_into(job_id, test_output_path, tmp_work, tmp_logs)

    # copy any instructor custom validation code into the tmp work directory
    copy_contents_into(job_id, custom_validation_code_path, tmp_work, tmp_logs)

    subprocess.call(['ls', '-lR', '.'],
                    stdout=open(tmp_logs + "/overall.txt", 'a'))

    # copy validator.out to the current directory
    shutil.copy(os.path.join(bin_path, "validate.out"),
                os.path.join(tmp_work, "my_validator.out"))

    # give the untrusted user read/write/execute permissions on the tmp directory & files
    add_permissions_recursive(tmp_work,
                              stat.S_IROTH | stat.S_IWOTH | stat.S_IXOTH,
                              stat.S_IROTH | stat.S_IWOTH | stat.S_IXOTH,
                              stat.S_IROTH | stat.S_IWOTH | stat.S_IXOTH)

    add_permissions(
        os.path.join(tmp_work, "my_validator.out"), stat.S_IXUSR | stat.S_IXGRP
        | stat.S_IROTH | stat.S_IWOTH | stat.S_IXOTH)

    #todo remove prints.
    print("VALIDATING")
    # validator the validator.out as the untrusted user
    with open(os.path.join(tmp_logs, "validator_log.txt"), 'w') as logfile:
        if USE_DOCKER:
            # WIP: This option file facilitated testing...
            #USE_DOCKER = os.path.isfile("/tmp/use_docker")
            #use_docker_string="grading begins, using DOCKER" if USE_DOCKER else "grading begins (not using docker)"
            #grade_items_logging.log_message(job_id,is_batch_job,which_untrusted,submission_path,message=use_docker_string)
            container = subprocess.check_output([
                'docker', 'run', '-t', '-d', '-v', tmp + ':' + tmp,
                'ubuntu:custom'
            ]).decode('utf8').strip()
            dockerlaunch_done = dateutils.get_current_time()
            dockerlaunch_time = (dockerlaunch_done -
                                 grading_began).total_seconds()
            grade_items_logging.log_message(job_id, is_batch_job,
                                            which_untrusted, item_name,
                                            "dcct:", dockerlaunch_time,
                                            "docker container created")

            validator_success = subprocess.call([
                'docker', 'exec', '-w', tmp_work, container,
                os.path.join(tmp_work, 'my_validator.out'),
                queue_obj['gradeable'], queue_obj['who'],
                str(queue_obj['version']), submission_string
            ],
                                                stdout=logfile)
        else:
            validator_success = subprocess.call([
                os.path.join(SUBMITTY_INSTALL_DIR, "sbin",
                             "untrusted_execute"), which_untrusted,
                os.path.join(tmp_work, "my_validator.out"),
                queue_obj["gradeable"], queue_obj["who"],
                str(queue_obj["version"]), submission_string
            ],
                                                stdout=logfile)

    if validator_success == 0:
        print(which_machine, which_untrusted, "VALIDATOR OK")
    else:
        print(which_machine, which_untrusted, "VALIDATOR FAILURE")
        grade_items_logging.log_message(job_id,
                                        is_batch_job,
                                        which_untrusted,
                                        item_name,
                                        message="VALIDATION FAILURE")

    untrusted_grant_rwx_access(which_untrusted, tmp_work)

    # grab the result of autograding
    grade_result = ""
    try:
        with open(os.path.join(tmp_work, "grade.txt")) as f:
            lines = f.readlines()
            for line in lines:
                line = line.rstrip('\n')
                if line.startswith("Automatic grading total:"):
                    grade_result = line
    except:
        with open(os.path.join(tmp_logs, "overall.txt"), 'a') as f:
            print("\n\nERROR: Grading incomplete -- Could not open ",
                  os.path.join(tmp_work, "grade.txt"))
            grade_items_logging.log_message(
                job_id,
                is_batch_job,
                which_untrusted,
                item_name,
                message="ERROR: grade.txt does not exist")
            grade_items_logging.log_stack_trace(job_id,
                                                is_batch_job,
                                                which_untrusted,
                                                item_name,
                                                trace=traceback.format_exc())

    # --------------------------------------------------------------------
    # MAKE RESULTS DIRECTORY & COPY ALL THE FILES THERE
    tmp_results = os.path.join(tmp, "TMP_RESULTS")

    with open(os.path.join(tmp_logs, "overall.txt"), 'a') as f:
        print("====================================\nARCHIVING STARTS", file=f)

    subprocess.call(['ls', '-lR', '.'],
                    stdout=open(tmp_logs + "/overall.txt", 'a'))

    os.makedirs(os.path.join(tmp_results, "details"))

    # remove the test_input directory, so we don't archive it!
    shutil.rmtree(os.path.join(tmp_work, "test_input"))

    # loop over the test case directories, and remove any files that are also in the test_input folder
    for testcase_num in range(1, len(my_testcases) + 1):
        testcase_folder = os.path.join(tmp_work,
                                       "test{:02}".format(testcase_num))
        remove_test_input_files(os.path.join(tmp_logs, "overall.txt"),
                                test_input_path, testcase_folder)

    patterns_work_to_details = complete_config_obj["autograding"][
        "work_to_details"]
    pattern_copy("work_to_details", patterns_work_to_details, tmp_work,
                 os.path.join(tmp_results, "details"), tmp_logs)

    if ("work_to_public" in complete_config_obj["autograding"]
            and len(complete_config_obj["autograding"]["work_to_public"]) > 0):
        # create the directory
        os.makedirs(os.path.join(tmp_results, "results_public"))
        # copy the files
        patterns_work_to_public = complete_config_obj["autograding"][
            "work_to_public"]
        pattern_copy("work_to_public", patterns_work_to_public, tmp_work,
                     os.path.join(tmp_results, "results_public"), tmp_logs)

    history_file_tmp = os.path.join(tmp_submission, "history.json")
    history_file = os.path.join(tmp_results, "history.json")
    if os.path.isfile(history_file_tmp):
        shutil.move(history_file_tmp, history_file)
        # fix permissions
        ta_group_id = os.stat(tmp_results).st_gid
        os.chown(history_file, int(DAEMON_UID), ta_group_id)
        add_permissions(history_file, stat.S_IRGRP)
    grading_finished = dateutils.get_current_time()

    try:
        shutil.copy(os.path.join(tmp_work, "grade.txt"), tmp_results)
    except:
        with open(os.path.join(tmp_logs, "overall.txt"), 'a') as f:
            print("\n\nERROR: Grading incomplete -- Could not copy ",
                  os.path.join(tmp_work, "grade.txt"))
        grade_items_logging.log_message(
            job_id,
            is_batch_job,
            which_untrusted,
            item_name,
            message="ERROR: grade.txt does not exist")
        grade_items_logging.log_stack_trace(job_id,
                                            is_batch_job,
                                            which_untrusted,
                                            item_name,
                                            trace=traceback.format_exc())

    # -------------------------------------------------------------
    # create/append to the results history

    # grab the submission time
    with open(os.path.join(submission_path,
                           ".submit.timestamp")) as submission_time_file:
        submission_string = submission_time_file.read().rstrip()
    submission_datetime = dateutils.read_submitty_date(submission_string)

    gradeable_deadline_datetime = dateutils.read_submitty_date(
        gradeable_deadline_string)
    gradeable_deadline_longstring = dateutils.write_submitty_date(
        gradeable_deadline_datetime)
    submission_longstring = dateutils.write_submitty_date(submission_datetime)

    seconds_late = int(
        (submission_datetime - gradeable_deadline_datetime).total_seconds())
    # note: negative = not late

    grading_finished_longstring = dateutils.write_submitty_date(
        grading_finished)

    gradingtime = (grading_finished - grading_began).total_seconds()

    with open(os.path.join(tmp_submission, "queue_file.json"), 'r') as infile:
        queue_obj = json.load(infile)
    queue_obj["gradingtime"] = gradingtime
    queue_obj["grade_result"] = grade_result
    queue_obj["which_untrusted"] = which_untrusted

    with open(os.path.join(tmp_results, "queue_file.json"), 'w') as outfile:
        json.dump(queue_obj,
                  outfile,
                  sort_keys=True,
                  indent=4,
                  separators=(',', ': '))

    try:
        shutil.move(os.path.join(tmp_work, "results.json"),
                    os.path.join(tmp_results, "results.json"))
    except:
        with open(os.path.join(tmp_logs, "overall.txt"), 'a') as f:
            print("\n\nERROR: Grading incomplete -- Could not open/write ",
                  os.path.join(tmp_work, "results.json"))
            grade_items_logging.log_message(
                job_id,
                is_batch_job,
                which_untrusted,
                item_name,
                message="ERROR: results.json read/write error")
            grade_items_logging.log_stack_trace(job_id,
                                                is_batch_job,
                                                which_untrusted,
                                                item_name,
                                                trace=traceback.format_exc())

    write_grade_history.just_write_grade_history(
        history_file, gradeable_deadline_longstring, submission_longstring,
        seconds_late, queue_time_longstring, is_batch_job_string,
        grading_began_longstring, int(waittime), grading_finished_longstring,
        int(gradingtime), grade_result, revision)

    os.chdir(SUBMITTY_DATA_DIR)

    if USE_DOCKER:
        with open(os.path.join(tmp_logs, "overall_log.txt"), 'w') as logfile:
            chmod_success = subprocess.call([
                'docker', 'exec', '-w', tmp_work, container, 'chmod', '-R',
                'ugo+rwx', '.'
            ],
                                            stdout=logfile)

    with open(os.path.join(tmp_logs, "overall.txt"), 'a') as f:
        f.write("FINISHED GRADING!\n")

    # save the logs!
    shutil.copytree(tmp_logs, os.path.join(tmp_results, "logs"))

    # zip up results folder
    filehandle, my_results_zip_file = tempfile.mkstemp()
    zip_my_directory(tmp_results, my_results_zip_file)
    os.close(filehandle)
    shutil.rmtree(tmp_autograding)
    shutil.rmtree(tmp_submission)
    shutil.rmtree(tmp_results)
    shutil.rmtree(tmp_work)
    shutil.rmtree(tmp)

    # WIP: extra logging for testing
    #grade_items_logging.log_message(job_id,is_batch_job,which_untrusted,submission_path,message="done grading")

    # --------------------------------------------------------------------
    # CLEAN UP DOCKER
    if USE_DOCKER:
        subprocess.call(['docker', 'rm', '-f', container])
        dockerdestroy_done = dateutils.get_current_time()
        dockerdestroy_time = (dockerdestroy_done -
                              grading_finished).total_seconds()
        grade_items_logging.log_message(job_id, is_batch_job, which_untrusted,
                                        item_name, "ddt:", dockerdestroy_time,
                                        "docker container destroyed")

    grade_items_logging.log_message(job_id, is_batch_job, which_untrusted,
                                    item_name, "grade:", gradingtime,
                                    grade_result)

    return my_results_zip_file
Exemple #10
0
def archive_autograding_results(working_directory, job_id, which_untrusted, is_batch_job, complete_config_obj, 
                                gradeable_config_obj, queue_obj, log_path, stack_trace_log_path, is_test_environment):
    """ After grading is finished, archive the results. """

    tmp_autograding = os.path.join(working_directory,"TMP_AUTOGRADING")
    tmp_submission = os.path.join(working_directory,"TMP_SUBMISSION")
    tmp_work = os.path.join(working_directory,"TMP_WORK")
    tmp_logs = os.path.join(working_directory,"TMP_SUBMISSION","tmp_logs")
    tmp_results = os.path.join(working_directory,"TMP_RESULTS")
    submission_path = os.path.join(tmp_submission, "submission")
    random_output_path = os.path.join(tmp_work, 'random_output')

    if "generate_output" not in queue_obj:
        partial_path = os.path.join(queue_obj["gradeable"],queue_obj["who"],str(queue_obj["version"]))
        item_name = os.path.join(queue_obj["semester"],queue_obj["course"],"submissions",partial_path)
    elif queue_obj["generate_output"]:
        item_name = os.path.join(queue_obj["semester"],queue_obj["course"],"generated_output",queue_obj["gradeable"])
    results_public_dir = os.path.join(tmp_results,"results_public")
    results_details_dir = os.path.join(tmp_results, "details")
    patterns = complete_config_obj['autograding']

    # Copy work to details
    pattern_copy("work_to_details", patterns['work_to_details'], tmp_work, results_details_dir, tmp_logs)
    
    # Copy work to public
    if 'work_to_public' in patterns:
        pattern_copy("work_to_public", patterns['work_to_public'], tmp_work, results_public_dir, tmp_logs)

    if os.path.exists(random_output_path):
        pattern_copy("work_to_random_output", [os.path.join(random_output_path, 'test*', '**', '*.txt'),], tmp_work, tmp_results, tmp_logs)
    # timestamp of first access to the gradeable page
    first_access_string = ""
    # grab the submission time
    if "generate_output" in queue_obj and queue_obj["generate_output"]:
        submission_string = ""
    else:
        with open(os.path.join(tmp_submission, 'submission' ,".submit.timestamp"), 'r') as submission_time_file:
            submission_string = submission_time_file.read().rstrip()
        # grab the first access to the gradeable page (if it exists)
        user_assignment_access_filename = os.path.join(tmp_submission, "user_assignment_access.json")
        if os.path.exists(user_assignment_access_filename):
            with open(user_assignment_access_filename, 'r') as access_file:
                obj = json.load(access_file, object_pairs_hook=collections.OrderedDict)
                first_access_string = obj["page_load_history"][0]["time"]

    history_file_tmp = os.path.join(tmp_submission,"history.json")
    history_file = os.path.join(tmp_results,"history.json")
    if os.path.isfile(history_file_tmp) and not is_test_environment:

        from . import CONFIG_PATH
        with open(os.path.join(CONFIG_PATH, 'submitty_users.json')) as open_file:
            OPEN_JSON = json.load(open_file)
        DAEMON_UID = OPEN_JSON['daemon_uid']

        shutil.move(history_file_tmp, history_file)
        # fix permissions
        ta_group_id = os.stat(tmp_results).st_gid
        os.chown(history_file, int(DAEMON_UID),ta_group_id)
        add_permissions(history_file, stat.S_IRGRP)
    grading_finished = dateutils.get_current_time()

    if "generate_output" not in queue_obj:
        try:
            shutil.copy(os.path.join(tmp_work, "grade.txt"), tmp_results)
        except:
            with open(os.path.join(tmp_logs,"overall.txt"),'a') as f:
                print ("\n\nERROR: Grading incomplete -- Could not copy ",os.path.join(tmp_work,"grade.txt"))
            log_message(log_path, job_id, is_batch_job, which_untrusted, item_name, message="ERROR: grade.txt does not exist")
            log_stack_trace(stack_trace_log_path, job_id, is_batch_job, which_untrusted, item_name, trace=traceback.format_exc())

        grade_result = ""
        try:
            with open(os.path.join(tmp_work,"grade.txt")) as f:
                lines = f.readlines()
                for line in lines:
                    line = line.rstrip('\n')
                    if line.startswith("Automatic grading total:"):
                        grade_result = line
        except:
            with open(os.path.join(tmp_logs,"overall.txt"),'a') as f:
                print ("\n\nERROR: Grading incomplete -- Could not open ",os.path.join(tmp_work,"grade.txt"))
                log_message(job_id,is_batch_job,which_untrusted,item_name,message="ERROR: grade.txt does not exist")
                log_stack_trace(job_id,is_batch_job,which_untrusted,item_name,trace=traceback.format_exc())


        gradeable_deadline_string = gradeable_config_obj["date_due"]

        # FIXME: The access date string is currently misformatted
        #    mm-dd-yyyy, but we want yyyy-mm-dd.  Also it is missing
        #    the common name timezone string, e.g., "America/NewYork".
        #    We should standardize this logging eventually, but
        #    keeping it as is because we are mid-semester with this
        #    new feature and I don't want to break things.
        first_access_string = dateutils.normalize_submitty_date(first_access_string)
        
        submission_datetime = dateutils.read_submitty_date(submission_string)
        gradeable_deadline_datetime = dateutils.read_submitty_date(gradeable_deadline_string)
        gradeable_deadline_longstring = dateutils.write_submitty_date(gradeable_deadline_datetime)
        submission_longstring = dateutils.write_submitty_date(submission_datetime)
        seconds_late = int((submission_datetime-gradeable_deadline_datetime).total_seconds())
        # compute the access duration in seconds (if it exists)
        access_duration = -1
        if first_access_string != "":
            first_access_datetime = dateutils.read_submitty_date(first_access_string)
            access_duration = int((submission_datetime-first_access_datetime).total_seconds())

        # note: negative = not late
        grading_finished_longstring = dateutils.write_submitty_date(grading_finished)

        with open(os.path.join(tmp_submission,".grading_began"), 'r') as f:
            grading_began_longstring = f.read()
        grading_began = dateutils.read_submitty_date(grading_began_longstring)

        gradingtime = (grading_finished - grading_began).total_seconds()

        queue_obj["gradingtime"]=gradingtime
        queue_obj["grade_result"]=grade_result
        queue_obj["which_untrusted"]=which_untrusted
        waittime = queue_obj["waittime"]

        try:

            # Make certain results.json is utf-8 encoded.
            results_json_path = os.path.join(tmp_work, 'results.json')
            with codecs.open(results_json_path, 'r', encoding='utf-8', errors='ignore') as infile:
                results_str = "".join(line.rstrip() for line in infile)
                results_obj = json.loads(results_str)
            with open(results_json_path, 'w') as outfile:
                json.dump(results_obj, outfile, indent=4)

            shutil.move(results_json_path, os.path.join(tmp_results, "results.json"))
        except:
            with open(os.path.join(tmp_logs,"overall.txt"),'a') as f:
                print ("\n\nERROR: Grading incomplete -- Could not open/write ",os.path.join(tmp_work,"results.json"))
                log_message(log_path, job_id,is_batch_job,which_untrusted,item_name,message="ERROR: results.json read/write error")
                log_stack_trace(stack_trace_log_path, job_id,is_batch_job,which_untrusted,item_name,trace=traceback.format_exc())

        # Rescue custom validator files
        custom_validator_output_directory = os.path.join(tmp_results, "custom_validator_output")
        pattern_copy("rescue_custom_validator_validation_jsons", [os.path.join(tmp_work, 'validation_results_*.json'),], tmp_work, custom_validator_output_directory, tmp_logs)
        pattern_copy("rescue_custom_validator_logs", [os.path.join(tmp_work, 'validation_logfile_*.txt'),], tmp_work, custom_validator_output_directory, tmp_logs)
        pattern_copy("rescue_custom_validator_errors", [os.path.join(tmp_work, 'validation_stderr_*.txt'),], tmp_work, custom_validator_output_directory, tmp_logs)

        just_write_grade_history(history_file,
                                gradeable_deadline_longstring,
                                submission_longstring,
                                seconds_late,
                                first_access_string,
                                access_duration,
                                queue_obj["queue_time"],
                                "BATCH" if is_batch_job else "INTERACTIVE",
                                grading_began_longstring,
                                int(waittime),
                                grading_finished_longstring,
                                int(gradingtime),
                                grade_result,
                                queue_obj.get("revision", None))

        with open(os.path.join(tmp_logs,"overall.txt"),'a') as f:
            f.write("FINISHED GRADING!\n")
        
        log_message(log_path, job_id,is_batch_job,which_untrusted,item_name,"grade:",gradingtime,grade_result)

    with open(os.path.join(tmp_results,"queue_file.json"),'w') as outfile:
        json.dump(queue_obj,outfile,sort_keys=True,indent=4,separators=(',', ': '))

    # save the logs!
    shutil.copytree(tmp_logs,os.path.join(tmp_results,"logs"))
def just_grade_item(next_directory,next_to_grade,which_untrusted):

    my_pid = os.getpid()

    # verify the hwcron user is running this script
    if not int(os.getuid()) == int(HWCRON_UID):
        grade_items_logging.log_message("ERROR: must be run by hwcron")
        raise SystemExit("ERROR: the grade_item.py script must be run by the hwcron user")

    # --------------------------------------------------------
    # figure out what we're supposed to grade & error checking
    obj = get_submission_path(next_directory,next_to_grade)
    submission_path = os.path.join(SUBMITTY_DATA_DIR,"courses",obj["semester"],obj["course"],
                                   "submissions",obj["gradeable"],obj["who"],str(obj["version"]))
    if not os.path.isdir(submission_path):
        grade_items_logging.log_message("ERROR: the submission directory does not exist" + submission_path)
        raise SystemExit("ERROR: the submission directory does not exist",submission_path)
    print ("pid",my_pid,"GRADE THIS", submission_path)

    is_vcs,vcs_type,vcs_base_url,vcs_subdirectory = get_vcs_info(SUBMITTY_DATA_DIR,obj["semester"],obj["course"],obj["gradeable"],obj["who"],obj["team"])

    is_batch_job = next_directory==BATCH_QUEUE
    is_batch_job_string = "BATCH" if is_batch_job else "INTERACTIVE"

    queue_time = get_queue_time(next_directory,next_to_grade)
    queue_time_longstring = dateutils.write_submitty_date(queue_time)
    grading_began = dateutils.get_current_time()
    waittime = int((grading_began-queue_time).total_seconds())
    grade_items_logging.log_message(is_batch_job,which_untrusted,submission_path,"wait:",waittime,"")

    # --------------------------------------------------------
    # various paths
    provided_code_path = os.path.join(SUBMITTY_DATA_DIR,"courses",obj["semester"],obj["course"],"provided_code",obj["gradeable"])
    test_input_path = os.path.join(SUBMITTY_DATA_DIR,"courses",obj["semester"],obj["course"],"test_input",obj["gradeable"])
    test_output_path = os.path.join(SUBMITTY_DATA_DIR,"courses",obj["semester"],obj["course"],"test_output",obj["gradeable"])
    custom_validation_code_path = os.path.join(SUBMITTY_DATA_DIR,"courses",obj["semester"],obj["course"],"custom_validation_code",obj["gradeable"])
    bin_path = os.path.join(SUBMITTY_DATA_DIR,"courses",obj["semester"],obj["course"],"bin")

    checkout_path = os.path.join(SUBMITTY_DATA_DIR,"courses",obj["semester"],obj["course"],"checkout",obj["gradeable"],obj["who"],str(obj["version"]))
    results_path = os.path.join(SUBMITTY_DATA_DIR,"courses",obj["semester"],obj["course"],"results",obj["gradeable"],obj["who"],str(obj["version"]))

    # grab a copy of the current history.json file (if it exists)
    history_file = os.path.join(results_path,"history.json")
    history_file_tmp = ""
    if os.path.isfile(history_file):
        filehandle,history_file_tmp = tempfile.mkstemp()
        shutil.copy(history_file,history_file_tmp)

    # get info from the gradeable config file
    json_config = os.path.join(SUBMITTY_DATA_DIR,"courses",obj["semester"],obj["course"],"config","form","form_"+obj["gradeable"]+".json")
    with open(json_config, 'r') as infile:
        gradeable_config_obj = json.load(infile)

    # get info from the gradeable config file
    complete_config = os.path.join(SUBMITTY_DATA_DIR,"courses",obj["semester"],obj["course"],"config","complete_config","complete_config_"+obj["gradeable"]+".json")
    with open(complete_config, 'r') as infile:
        complete_config_obj = json.load(infile)

    checkout_subdirectory = complete_config_obj["autograding"].get("use_checkout_subdirectory","")
    checkout_subdir_path = os.path.join(checkout_path,checkout_subdirectory)

    # --------------------------------------------------------------------
    # MAKE TEMPORARY DIRECTORY & COPY THE NECESSARY FILES THERE
    tmp = os.path.join("/var/local/submitty/autograding_tmp/",which_untrusted,"tmp")
    shutil.rmtree(tmp,ignore_errors=True)
    os.makedirs(tmp)
    
    # switch to tmp directory
    os.chdir(tmp)

    # make the logs directory
    tmp_logs = os.path.join(tmp,"tmp_logs")
    os.makedirs(tmp_logs)

    # grab the submission time
    with open (os.path.join(submission_path,".submit.timestamp")) as submission_time_file:
        submission_string = submission_time_file.read().rstrip()
    
    submission_datetime = dateutils.read_submitty_date(submission_string)


    # --------------------------------------------------------------------
    # CHECKOUT THE STUDENT's REPO
    if is_vcs:
        with open(os.path.join(tmp_logs,"overall.txt"),'a') as f:
            print ("====================================\nVCS CHECKOUT", file=f)
            print ("vcs_subdirectory",vcs_subdirectory, file=f)
        # cleanup the previous checkout (if it exists)
        shutil.rmtree(checkout_path,ignore_errors=True)
        os.makedirs(checkout_path, exist_ok=True)
        subprocess.call (['/usr/bin/git', 'clone', vcs_subdirectory, checkout_path])
        os.chdir(checkout_path)

        # determine which version we need to checkout
        what_version = subprocess.check_output(['git', 'rev-list', '-n', '1', '--before="'+submission_string+'"', 'master'])
        what_version = str(what_version.decode('utf-8')).rstrip()
        if what_version == "":
            # oops, pressed the grade button before a valid commit
            shutil.rmtree(checkout_path,ignore_errors=True)
        else:
            # and check out the right version
            subprocess.call (['git', 'checkout', '-b', 'grade', what_version])
        os.chdir(tmp)
        subprocess.call(['ls', '-lR', checkout_path], stdout=open(tmp_logs + "/overall.txt", 'a'))


    # --------------------------------------------------------------------
    # START DOCKER

    container = None
    if USE_DOCKER:
        container = subprocess.check_output(['docker', 'run', '-t', '-d',
                                             '-v', tmp + ':' + tmp,
                                             'ubuntu:custom']).decode('utf8').strip()

    # --------------------------------------------------------------------
    # COMPILE THE SUBMITTED CODE

    with open(os.path.join(tmp_logs, "overall.txt"), 'a') as f:
        print("====================================\nCOMPILATION STARTS", file=f)
    
    # copy submitted files to the tmp compilation directory
    tmp_compilation = os.path.join(tmp,"TMP_COMPILATION")
    os.mkdir(tmp_compilation)
    os.chdir(tmp_compilation)
    
    gradeable_deadline_string = gradeable_config_obj["date_due"]
    
    patterns_submission_to_compilation = complete_config_obj["autograding"]["submission_to_compilation"]
    pattern_copy("submission_to_compilation",patterns_submission_to_compilation,submission_path,tmp_compilation,tmp_logs)
    if is_vcs:
        pattern_copy("checkout_to_compilation",patterns_submission_to_compilation,checkout_subdir_path,tmp_compilation,tmp_logs)
    
    # copy any instructor provided code files to tmp compilation directory
    copy_contents_into(provided_code_path,tmp_compilation,tmp_logs)

    subprocess.call(['ls', '-lR', '.'], stdout=open(tmp_logs + "/overall.txt", 'a'))

    # copy compile.out to the current directory
    shutil.copy (os.path.join(bin_path,obj["gradeable"],"compile.out"),os.path.join(tmp_compilation,"my_compile.out"))

    # give the untrusted user read/write/execute permissions on the tmp directory & files
    add_permissions_recursive(tmp_compilation,
                              stat.S_IRGRP | stat.S_IWGRP | stat.S_IXGRP,
                              stat.S_IRGRP | stat.S_IWGRP | stat.S_IXGRP,
                              stat.S_IRGRP | stat.S_IWGRP | stat.S_IXGRP)

    add_permissions(tmp,stat.S_IROTH | stat.S_IXOTH)
    add_permissions(tmp_logs,stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR)

    with open(os.path.join(tmp_logs,"compilation_log.txt"), 'w') as logfile:
        if USE_DOCKER:
            compile_success = subprocess.call(['docker', 'exec', '-w', tmp_compilation, container,
                                               os.path.join(tmp_compilation, 'my_compile.out'), obj['gradeable'],
                                               obj['who'], str(obj['version']), submission_string], stdout=logfile)
        else:
            compile_success = subprocess.call([os.path.join(SUBMITTY_INSTALL_DIR,"bin","untrusted_execute"),
                                               which_untrusted,
                                               os.path.join(tmp_compilation,"my_compile.out"),
                                               obj["gradeable"],
                                               obj["who"],
                                               str(obj["version"]),
                                               submission_string],
                                              stdout=logfile)

    if compile_success == 0:
        print ("pid",my_pid,"COMPILATION OK")
    else:
        print ("pid",my_pid,"COMPILATION FAILURE")
        grade_items_logging.log_message(is_batch_job,which_untrusted,submission_path,"","","COMPILATION FAILURE")
    #raise SystemExit()

    untrusted_grant_rwx_access(which_untrusted,tmp_compilation)
        
    # remove the compilation program
    os.remove(os.path.join(tmp_compilation,"my_compile.out"))

    # return to the main tmp directory
    os.chdir(tmp)


    # --------------------------------------------------------------------
    # make the runner directory

    with open(os.path.join(tmp_logs,"overall.txt"),'a') as f:
        print ("====================================\nRUNNER STARTS", file=f)
        
    tmp_work = os.path.join(tmp,"TMP_WORK")
    os.makedirs(tmp_work)
    os.chdir(tmp_work)

    # move all executable files from the compilation directory to the main tmp directory
    # Note: Must preserve the directory structure of compiled files (esp for Java)

    patterns_submission_to_runner = complete_config_obj["autograding"]["submission_to_runner"]
    pattern_copy("submission_to_runner",patterns_submission_to_runner,submission_path,tmp_work,tmp_logs)
    if is_vcs:
        pattern_copy("checkout_to_runner",patterns_submission_to_runner,checkout_subdir_path,tmp_work,tmp_logs)

    patterns_compilation_to_runner = complete_config_obj["autograding"]["compilation_to_runner"]
    pattern_copy("compilation_to_runner",patterns_compilation_to_runner,tmp_compilation,tmp_work,tmp_logs)
        
    # copy input files to tmp_work directory
    copy_contents_into(test_input_path,tmp_work,tmp_logs)

    subprocess.call(['ls', '-lR', '.'], stdout=open(tmp_logs + "/overall.txt", 'a'))

    # copy runner.out to the current directory
    shutil.copy (os.path.join(bin_path,obj["gradeable"],"run.out"),os.path.join(tmp_work,"my_runner.out"))

    # give the untrusted user read/write/execute permissions on the tmp directory & files
    add_permissions_recursive(tmp_work,
                              stat.S_IROTH | stat.S_IWOTH | stat.S_IXOTH,
                              stat.S_IROTH | stat.S_IWOTH | stat.S_IXOTH,
                              stat.S_IROTH | stat.S_IWOTH | stat.S_IXOTH)

    # raise SystemExit()
    # run the run.out as the untrusted user
    with open(os.path.join(tmp_logs,"runner_log.txt"), 'w') as logfile:
        if USE_DOCKER:
            runner_success = subprocess.call(['docker', 'exec', '-w', tmp_work, container,
                                              os.path.join(tmp_work, 'my_runner.out'), obj['gradeable'],
                                              obj['who'], str(obj['version']), submission_string], stdout=logfile)
        else:
            runner_success = subprocess.call([os.path.join(SUBMITTY_INSTALL_DIR,"bin","untrusted_execute"),
                                              which_untrusted,
                                              os.path.join(tmp_work,"my_runner.out"),
                                              obj["gradeable"],
                                              obj["who"],
                                              str(obj["version"]),
                                              submission_string],
                                              stdout=logfile)

    if runner_success == 0:
        print ("pid",my_pid,"RUNNER OK")
    else:
        print ("pid",my_pid,"RUNNER FAILURE")
        grade_items_logging.log_message(is_batch_job,which_untrusted,submission_path,"","","RUNNER FAILURE")

    untrusted_grant_rwx_access(which_untrusted,tmp_work)
    untrusted_grant_rwx_access(which_untrusted,tmp_compilation)

    # --------------------------------------------------------------------
    # RUN VALIDATOR

    with open(os.path.join(tmp_logs,"overall.txt"),'a') as f:
        print ("====================================\nVALIDATION STARTS", file=f)

    # copy results files from compilation...
    patterns_submission_to_validation = complete_config_obj["autograding"]["submission_to_validation"]
    pattern_copy("submission_to_validation",patterns_submission_to_validation,submission_path,tmp_work,tmp_logs)
    if is_vcs:
        pattern_copy("checkout_to_validation",patterns_submission_to_validation,checkout_subdir_path,tmp_work,tmp_logs)
    patterns_compilation_to_validation = complete_config_obj["autograding"]["compilation_to_validation"]
    pattern_copy("compilation_to_validation",patterns_compilation_to_validation,tmp_compilation,tmp_work,tmp_logs)

    # remove the compilation directory
    shutil.rmtree(tmp_compilation)

    # copy output files to tmp_work directory
    copy_contents_into(test_output_path,tmp_work,tmp_logs)

    # copy any instructor custom validation code into the tmp work directory
    copy_contents_into(custom_validation_code_path,tmp_work,tmp_logs)

    subprocess.call(['ls', '-lR', '.'], stdout=open(tmp_logs + "/overall.txt", 'a'))

    # copy validator.out to the current directory
    shutil.copy (os.path.join(bin_path,obj["gradeable"],"validate.out"),os.path.join(tmp_work,"my_validator.out"))

    # give the untrusted user read/write/execute permissions on the tmp directory & files
    add_permissions_recursive(tmp_work,
                              stat.S_IROTH | stat.S_IWOTH | stat.S_IXOTH,
                              stat.S_IROTH | stat.S_IWOTH | stat.S_IXOTH,
                              stat.S_IROTH | stat.S_IWOTH | stat.S_IXOTH)

    add_permissions(os.path.join(tmp_work,"my_validator.out"),stat.S_IROTH | stat.S_IXOTH)

    # validator the validator.out as the untrusted user
    with open(os.path.join(tmp_logs,"validator_log.txt"), 'w') as logfile:
        if USE_DOCKER:
            validator_success = subprocess.call(['docker', 'exec', '-w', tmp_work, container,
                                                 os.path.join(tmp_work, 'my_validator.out'), obj['gradeable'],
                                                 obj['who'], str(obj['version']), submission_string], stdout=logfile)
        else:
            validator_success = subprocess.call([os.path.join(SUBMITTY_INSTALL_DIR,"bin","untrusted_execute"),
                                                 which_untrusted,
                                                 os.path.join(tmp_work,"my_validator.out"),
                                                 obj["gradeable"],
                                                 obj["who"],
                                                 str(obj["version"]),
                                                 submission_string],
                                                stdout=logfile)

    if validator_success == 0:
        print ("pid",my_pid,"VALIDATOR OK")
    else:
        print ("pid",my_pid,"VALIDATOR FAILURE")
        grade_items_logging.log_message(is_batch_job,which_untrusted,submission_path,"","","VALIDATION FAILURE")

    untrusted_grant_rwx_access(which_untrusted,tmp_work)

    # grab the result of autograding
    grade_result = ""
    with open(os.path.join(tmp_work,"grade.txt")) as f:
        lines = f.readlines()
        for line in lines:
            line = line.rstrip('\n')
            if line.startswith("Automatic grading total:"):
                grade_result = line

    # --------------------------------------------------------------------
    # MAKE RESULTS DIRECTORY & COPY ALL THE FILES THERE

    with open(os.path.join(tmp_logs,"overall.txt"),'a') as f:
        print ("====================================\nARCHIVING STARTS", file=f)

    subprocess.call(['ls', '-lR', '.'], stdout=open(tmp_logs + "/overall.txt", 'a'))

    os.chdir(bin_path)

    # save the old results path!
    if os.path.isdir(os.path.join(results_path,"OLD")):
        shutil.move(os.path.join(results_path,"OLD"),
                    os.path.join(tmp,"OLD_RESULTS"))

    # clean out all of the old files if this is a re-run
    shutil.rmtree(results_path,ignore_errors=True)

    # create the directory (and the full path if it doesn't already exist)
    os.makedirs(results_path)

    # bring back the old results!
    if os.path.isdir(os.path.join(tmp,"OLD_RESULTS")):
        shutil.move(os.path.join(tmp,"OLD_RESULTS"),
                    os.path.join(results_path,"OLD"))

    os.makedirs(os.path.join(results_path,"details"))

    patterns_work_to_details = complete_config_obj["autograding"]["work_to_details"]
    pattern_copy("work_to_details",patterns_work_to_details,tmp_work,os.path.join(results_path,"details"),tmp_logs)

    if not history_file_tmp == "":
        shutil.move(history_file_tmp,history_file)
        # fix permissions
        ta_group_id = os.stat(results_path).st_gid
        os.chown(history_file,int(HWCRON_UID),ta_group_id)
        add_permissions(history_file,stat.S_IRGRP)
        
    grading_finished = dateutils.get_current_time()

    shutil.copy(os.path.join(tmp_work,"results.json"),results_path)
    shutil.copy(os.path.join(tmp_work,"grade.txt"),results_path)

    # -------------------------------------------------------------
    # create/append to the results history

    gradeable_deadline_datetime = dateutils.read_submitty_date(gradeable_deadline_string)
    gradeable_deadline_longstring = dateutils.write_submitty_date(gradeable_deadline_datetime)
    submission_longstring = dateutils.write_submitty_date(submission_datetime)
    
    seconds_late = int((submission_datetime-gradeable_deadline_datetime).total_seconds())
    # note: negative = not late

    grading_began_longstring = dateutils.write_submitty_date(grading_began)
    grading_finished_longstring = dateutils.write_submitty_date(grading_finished)

    gradingtime = int((grading_finished-grading_began).total_seconds())

    write_grade_history.just_write_grade_history(history_file,
                                                 gradeable_deadline_longstring,
                                                 submission_longstring,
                                                 seconds_late,
                                                 queue_time_longstring,
                                                 is_batch_job_string,
                                                 grading_began_longstring,
                                                 waittime,
                                                 grading_finished_longstring,
                                                 gradingtime,
                                                 grade_result)

    #---------------------------------------------------------------------
    # WRITE OUT VERSION DETAILS
    if WRITE_DATABASE:
        insert_database_version_data.insert_to_database(
            obj["semester"],
            obj["course"],
            obj["gradeable"],
            obj["user"],
            obj["team"],
            obj["who"],
            True if obj["is_team"] else False,
            str(obj["version"]))

    print ("pid",my_pid,"finished grading ", next_to_grade, " in ", gradingtime, " seconds")

    grade_items_logging.log_message(is_batch_job,which_untrusted,submission_path,"grade:",gradingtime,grade_result)

    with open(os.path.join(tmp_logs,"overall.txt"),'a') as f:
        f.write("FINISHED GRADING!")

    # save the logs!
    shutil.copytree(tmp_logs,os.path.join(results_path,"logs"))

    # --------------------------------------------------------------------
    # REMOVE TEMP DIRECTORY
    shutil.rmtree(tmp)

    # --------------------------------------------------------------------
    # CLEAN UP DOCKER
    if USE_DOCKER:
        subprocess.call(['docker', 'rm', '-f', container])
Exemple #12
0
def replay(starttime,endtime):
    replay_starttime=datetime.datetime.now()
    print (replay_starttime,"replay start: ",starttime)

    # error checking
    if not (starttime.year == endtime.year and
            starttime.month == endtime.month and
            starttime.day == endtime.day):
        print ("ERROR!  invalid replay range ",starttime,"->",endtime, " (must be same day)")
        exit()
    if starttime >= endtime:
        print ("ERROR!  invalid replay range ",starttime,"->",endtime, " (invalid times)")
        exit()

    # file the correct file
    file = '/var/local/submitty/logs/autograding/{:d}{:02d}{:02d}.txt'.format(starttime.year,starttime.month,starttime.day)
    with open(file,'r') as lines:
        for line in lines:
            things = line.split('|')
            original_time = dateutils.read_submitty_date(things[0])
            # skip items outside of this time range
            if (original_time < starttime or
                original_time > endtime):
                continue
            # skip batch items
            if (things[2].strip() == "BATCH"):
                continue
            # only process the "wait" time (when we started grading the item)
            iswait=things[5].strip()[0:5]
            if (iswait != "wait:"):
                continue
            waittime=float(things[5].split()[1])
            # grab the job name
            my_job = things[4].strip()
            if my_job == "":
                continue
            what = my_job.split('/')
            # for now, only interested in Data Structures and Computer Science 1
            if not (what[1]=="csci1200" or what[1]=="csci1100"):
                continue
            # calculate when this job should be relaunched
            time_multipler=1.0
            pause_time=replay_starttime+(time_multiplier*(original_time-starttime))
            pause.until(pause_time)
            queue_time = dateutils.write_submitty_date()
            print(datetime.datetime.now(),"      REPLAY: ",original_time," ",my_job)
            # FIXME : This will need to be adjust for team assignments
            # and assignments with special required capabilities!
            item = {"semester": what[0],
                    "course": what[1],
                    "gradeable": what[3],
                    "user": what[4],
                    "team": "",
                    "who": what[4],
                    "is_team": False,
                    "version": what[5],
                    "required_capabilities": "default",
                    "queue_time": queue_time,
                    "regrade": True,
                    "max_possible_grading_time" : -1 }
            file_name = "__".join([item['semester'], item['course'], item['gradeable'], item['who'], item['version']])
            file_name = os.path.join(SUBMITTY_DATA_DIR, "to_be_graded_queue", file_name)
            with open(file_name, "w") as open_file:
                json.dump(item, open_file, sort_keys=True, indent=4)
                os.system("chmod o+rw {}".format(file_name))  
    print (datetime.datetime.now(),"replay end: ",endtime)
Exemple #13
0
def main():
    args = arg_parse()
    data_dir = os.path.join(SUBMITTY_DATA_DIR, "courses")
    data_dirs = data_dir.split(os.sep)
    grade_queue = []
    if not args.times is None:
        starttime = dateutils.read_submitty_date(args.times[0])
        endtime = dateutils.read_submitty_date(args.times[1])
        replay(starttime,endtime)
        exit()
    if len(args.path) == 0:
        print ("ERROR! Must specify at least one path")
        exit()
    for input_path in args.path:
        print ('input path',input_path)
        # handle relative path
        if input_path == '.':
            input_path = os.getcwd()
        if input_path[0] != '/':
            input_path = os.getcwd() + '/' + input_path
        # remove trailing slash (if any)
        input_path = input_path.rstrip('/')
        # split the path into directories
        dirs = input_path.split(os.sep)

        # must be in the known submitty base data directory
        if dirs[0:len(data_dirs)] != data_dirs:
            print("ERROR: BAD REGRADE SUBMISSIONS PATH",input_path)
            raise SystemExit("You need to point to a directory within {}".format(data_dir))

        # Extract directories from provided pattern path (path may be incomplete)
        pattern_semester="*"
        if len(dirs) > len(data_dirs):
            pattern_semester=dirs[len(data_dirs)]
        pattern_course="*"
        if len(dirs) > len(data_dirs)+1:
            pattern_course=dirs[len(data_dirs)+1]
        if len(dirs) > len(data_dirs)+2:
            if (dirs[len(data_dirs)+2] != "submissions"):
                raise SystemExit("You must specify the submissions directory within the course")
        pattern_gradeable="*"
        if len(dirs) > len(data_dirs)+3:
            pattern_gradeable=dirs[len(data_dirs)+3]
        pattern_who="*"
        if len(dirs) > len(data_dirs)+4:
            pattern_who=dirs[len(data_dirs)+4]
        pattern_version="*"
        if len(dirs) > len(data_dirs)+5:
            pattern_version=dirs[len(data_dirs)+5]

        # full pattern may include wildcards!
        pattern = os.path.join(pattern_semester,pattern_course,"submissions",pattern_gradeable,pattern_who,pattern_version)

        print("pattern: ",pattern)

        # Find all matching submissions
        for d in Path(data_dir).glob(pattern):
            d = str(d)
            if os.path.isdir(d):
                my_dirs = d.split(os.sep)
                if len(my_dirs) != len(data_dirs)+6:
                    raise SystemExit("ERROR: directory length not as expected")
                # if requested, only regrade the currently active versions
                if args.active_only and not is_active_version(d):
                    continue
                print("match: ",d)
                my_semester=my_dirs[len(data_dirs)]
                my_course=my_dirs[len(data_dirs)+1]
                my_gradeable=my_dirs[len(data_dirs)+3]
                gradeable_config = os.path.join(data_dir,my_semester,my_course,"config/build/"+"build_"+my_gradeable+".json")
                with open(gradeable_config, 'r') as build_configuration:
                    datastore = json.load(build_configuration)
                    required_capabilities = datastore.get('required_capabilities', 'default')
                    max_grading_time = datastore.get('max_possible_grading_time', -1)

                #get the current time
                queue_time = dateutils.write_submitty_date()
                my_who=my_dirs[len(data_dirs)+4]
                my_version=my_dirs[len(data_dirs)+5]
                my_path=os.path.join(data_dir,my_semester,my_course,"submissions",my_gradeable,my_who,my_version)
                if my_path != d:
                    raise SystemExit("ERROR: path reconstruction failed")
                # add them to the queue

                if '_' not in my_who:
                    my_user = my_who
                    my_team = ""
                    my_is_team = False
                else:
                    my_user = ""
                    my_team = my_who
                    my_is_team = True

                # FIXME: Set this value appropriately
                is_vcs_checkout = False

                grade_queue.append({"semester": my_semester,
                                    "course": my_course,
                                    "gradeable": my_gradeable,
                                    "user": my_user,
                                    "team": my_team,
                                    "who": my_who,
                                    "is_team": my_is_team,
                                    "version": my_version,
                                    "vcs_checkout": is_vcs_checkout,
                                    "required_capabilities" : required_capabilities,
                                    "queue_time":queue_time,
                                    "regrade":True,
                                    "max_possible_grading_time" : max_grading_time})

    # Check before adding a very large number of systems to the queue
    if len(grade_queue) > 50 and not args.no_input:
        inp = input("Found {:d} matching submissions. Add to queue? [y/n]".format(len(grade_queue)))
        if inp.lower() not in ["yes", "y"]:
            raise SystemExit("Aborting...")

    for item in grade_queue:
        file_name = "__".join([item['semester'], item['course'], item['gradeable'], item['who'], item['version']])
        file_name = os.path.join(SUBMITTY_DATA_DIR, "to_be_graded_queue", file_name)
        with open(file_name, "w") as open_file:
            json.dump(item, open_file, sort_keys=True, indent=4)
        os.system("chmod o+rw {}".format(file_name))

    print("Added {:d} to the queue for regrading.".format(len(grade_queue)))
def anon_log(in_filename,out_filename):
    count=0
    last_hour=0

    wait_count=0
    total_wait=0

    cs1_grade_count=0
    ds_grade_count=0
    cs1_total_grade=0
    ds_total_grade=0

    with open(in_filename,'r') as infile:
        with open (out_filename,'w') as outfile:
            for line in infile:
                line = line.strip()
                tokens = line.split('|')
                if len(tokens) == 6:
                    # pre f17
                    timestamp = tokens[0]
                    process = tokens[1]
                    batch = tokens[2]
                    untrusted = "           "
                    which = tokens[3].strip()
                    waitgrade = tokens[4]
                    result =tokens[5]

                    things=which.split('__')
                    if len(things) != 5:
                        # discard unparseable things (only errors)
                        continue
                    semester = things[0]
                    course = things[1]
                    assignment = things[2]
                    user = things[3]
                    version = things[4]
                    
                elif len(tokens) == 7:
                    # f17 or later
                    timestamp = tokens[0]
                    process = tokens[1]
                    batch = tokens[2]
                    untrusted = tokens[3]                
                    which=tokens[4].strip()
                    waitgrade =tokens[5]
                    result =tokens[6]
                    
                    things=which.split('/')
                    if len(things) != 6:
                        # discard unparseable things (only errors)
                        continue
                    semester = things[0]
                    course = things[1]
                    assignment = things[3]
                    user = things[4]
                    version = things[5]

                else:
                    # discard lines with bad format (usually errors)
                    continue

                if batch.strip()=="BATCH":
                    continue
                
                cs1=course=="csci1100"
                ds=course=="csci1200"
                cs1ords=cs1 or ds
                
                #print("which ",waitgrade)
                info=waitgrade.split()
                if len(info)==0:
                    continue
                val=float(info[1])
                if info[0]=="wait:":
                    count+=1
                
                if info[0]=="wait:" and cs1ords and val<600:
                    total_wait+=val
                    wait_count+=1
                if info[0]=="grade:" and cs1ords and val<600:
                    if cs1:
                        cs1_total_grade+=float(val)
                        cs1_grade_count+=1
                    if ds:
                        ds_total_grade+=float(val)
                        ds_grade_count+=1
                    
                when = dateutils.read_submitty_date(timestamp)
                if when.hour!=last_hour and (wait_count+cs1_grade_count+ds_grade_count>0):
                    my_stats(in_filename,last_hour,count,wait_count,total_wait,cs1_grade_count,cs1_total_grade,ds_grade_count,ds_total_grade)
                    last_hour=when.hour
                    wait_count=0
                    total_wait=0
                    cs1_grade_count=0
                    cs1_total_grade=0
                    ds_grade_count=0
                    ds_total_grade=0
                    count=0
                    
        if (wait_count+cs1_grade_count+ds_grade_count>0):
            my_stats(in_filename,last_hour,count,wait_count,total_wait,cs1_grade_count,cs1_total_grade,ds_grade_count,ds_total_grade)
Exemple #15
0
def grade_from_zip(my_autograding_zip_file, my_submission_zip_file,
                   which_untrusted):
    os.chdir(SUBMITTY_DATA_DIR)
    tmp = os.path.join("/var/local/submitty/autograding_tmp/", which_untrusted,
                       "tmp")

    # clean up old usage of this directory
    shutil.rmtree(tmp, ignore_errors=True)
    os.makedirs(tmp)

    which_machine = socket.gethostname()

    # unzip autograding and submission folders
    tmp_autograding = os.path.join(tmp, "TMP_AUTOGRADING")
    tmp_submission = os.path.join(tmp, "TMP_SUBMISSION")
    unzip_this_file(my_autograding_zip_file, tmp_autograding)
    unzip_this_file(my_submission_zip_file, tmp_submission)
    os.remove(my_autograding_zip_file)
    os.remove(my_submission_zip_file)

    tmp_logs = os.path.join(tmp, "TMP_SUBMISSION", "tmp_logs")

    queue_file = os.path.join(tmp_submission, "queue_file.json")
    with open(queue_file, 'r') as infile:
        queue_obj = json.load(infile)

    queue_time_longstring = queue_obj["queue_time"]
    waittime = queue_obj["waittime"]
    is_batch_job = queue_obj["regrade"]
    job_id = queue_obj["job_id"]
    is_batch_job_string = "BATCH" if is_batch_job else "INTERACTIVE"

    partial_path = os.path.join(queue_obj["gradeable"], queue_obj["who"],
                                str(queue_obj["version"]))
    item_name = os.path.join(queue_obj["semester"], queue_obj["course"],
                             "submissions", partial_path)

    grade_items_logging.log_message(job_id, is_batch_job, which_untrusted,
                                    item_name, "wait:", waittime, "")

    # --------------------------------------------------------------------
    # START DOCKER

    # WIP: This option file facilitated testing...
    #USE_DOCKER = os.path.isfile("/tmp/use_docker")
    #use_docker_string="grading begins, using DOCKER" if USE_DOCKER else "grading begins (not using docker)"
    #grade_items_logging.log_message(job_id,is_batch_job,which_untrusted,submission_path,message=use_docker_string)

    container = None
    if USE_DOCKER:
        container = subprocess.check_output([
            'docker', 'run', '-t', '-d', '-v', tmp + ':' + tmp, 'ubuntu:custom'
        ]).decode('utf8').strip()
        dockerlaunch_done = dateutils.get_current_time()
        dockerlaunch_time = (dockerlaunch_done - grading_began).total_seconds()
        grade_items_logging.log_message(job_id, is_batch_job, which_untrusted,
                                        submission_path, "dcct:",
                                        dockerlaunch_time,
                                        "docker container created")

    # --------------------------------------------------------------------
    # COMPILE THE SUBMITTED CODE

    with open(os.path.join(tmp_logs, "overall.txt"), 'a') as f:
        print("====================================\nCOMPILATION STARTS",
              file=f)

    # copy submitted files to the tmp compilation directory
    tmp_compilation = os.path.join(tmp, "TMP_COMPILATION")
    os.mkdir(tmp_compilation)
    os.chdir(tmp_compilation)

    submission_path = os.path.join(tmp_submission, "submission")
    checkout_path = os.path.join(tmp_submission, "checkout")

    provided_code_path = os.path.join(tmp_autograding, "provided_code")
    test_input_path = os.path.join(tmp_autograding, "test_input")
    test_output_path = os.path.join(tmp_autograding, "test_output")
    custom_validation_code_path = os.path.join(tmp_autograding,
                                               "custom_validation_code")
    bin_path = os.path.join(tmp_autograding, "bin")
    form_json_config = os.path.join(tmp_autograding, "form.json")
    complete_config = os.path.join(tmp_autograding, "complete_config.json")

    with open(form_json_config, 'r') as infile:
        gradeable_config_obj = json.load(infile)
    gradeable_deadline_string = gradeable_config_obj["date_due"]

    with open(complete_config, 'r') as infile:
        complete_config_obj = json.load(infile)
    patterns_submission_to_compilation = complete_config_obj["autograding"][
        "submission_to_compilation"]
    pattern_copy("submission_to_compilation",
                 patterns_submission_to_compilation, submission_path,
                 tmp_compilation, tmp_logs)

    is_vcs = gradeable_config_obj["upload_type"] == "repository"
    checkout_subdirectory = complete_config_obj["autograding"].get(
        "use_checkout_subdirectory", "")
    checkout_subdir_path = os.path.join(checkout_path, checkout_subdirectory)

    if is_vcs:
        pattern_copy("checkout_to_compilation",
                     patterns_submission_to_compilation, checkout_subdir_path,
                     tmp_compilation, tmp_logs)

    # copy any instructor provided code files to tmp compilation directory
    copy_contents_into(job_id, provided_code_path, tmp_compilation, tmp_logs)

    subprocess.call(['ls', '-lR', '.'],
                    stdout=open(tmp_logs + "/overall.txt", 'a'))

    # copy compile.out to the current directory
    shutil.copy(os.path.join(bin_path, "compile.out"),
                os.path.join(tmp_compilation, "my_compile.out"))

    # give the untrusted user read/write/execute permissions on the tmp directory & files
    add_permissions_recursive(tmp_compilation,
                              stat.S_IRGRP | stat.S_IWGRP | stat.S_IXGRP,
                              stat.S_IRGRP | stat.S_IWGRP | stat.S_IXGRP,
                              stat.S_IRGRP | stat.S_IWGRP | stat.S_IXGRP)

    add_permissions(tmp, stat.S_IROTH | stat.S_IXOTH)
    add_permissions(tmp_logs, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR)

    # grab the submission time
    with open(os.path.join(submission_path, ".submit.timestamp"),
              'r') as submission_time_file:
        submission_string = submission_time_file.read().rstrip()

    with open(os.path.join(tmp_logs, "compilation_log.txt"), 'w') as logfile:
        if USE_DOCKER:
            compile_success = subprocess.call([
                'docker', 'exec', '-w', tmp_compilation, container,
                os.path.join(tmp_compilation, 'my_compile.out'),
                queue_obj['gradeable'], queue_obj['who'],
                str(queue_obj['version']), submission_string
            ],
                                              stdout=logfile)
        else:
            compile_success = subprocess.call([
                os.path.join(SUBMITTY_INSTALL_DIR, "sbin",
                             "untrusted_execute"), which_untrusted,
                os.path.join(tmp_compilation, "my_compile.out"),
                queue_obj["gradeable"], queue_obj["who"],
                str(queue_obj["version"]), submission_string
            ],
                                              stdout=logfile)

    if compile_success == 0:
        print(which_machine, which_untrusted, "COMPILATION OK")
    else:
        print(which_machine, which_untrusted, "COMPILATION FAILURE")
        grade_items_logging.log_message(job_id,
                                        is_batch_job,
                                        which_untrusted,
                                        item_name,
                                        message="COMPILATION FAILURE")

    untrusted_grant_rwx_access(which_untrusted, tmp_compilation)

    # remove the compilation program
    os.remove(os.path.join(tmp_compilation, "my_compile.out"))

    # return to the main tmp directory
    os.chdir(tmp)

    # --------------------------------------------------------------------
    # make the runner directory

    with open(os.path.join(tmp_logs, "overall.txt"), 'a') as f:
        print("====================================\nRUNNER STARTS", file=f)

    tmp_work = os.path.join(tmp, "TMP_WORK")
    os.makedirs(tmp_work)
    os.chdir(tmp_work)

    # move all executable files from the compilation directory to the main tmp directory
    # Note: Must preserve the directory structure of compiled files (esp for Java)

    patterns_submission_to_runner = complete_config_obj["autograding"][
        "submission_to_runner"]
    pattern_copy("submission_to_runner", patterns_submission_to_runner,
                 submission_path, tmp_work, tmp_logs)
    if is_vcs:
        pattern_copy("checkout_to_runner", patterns_submission_to_runner,
                     checkout_subdir_path, tmp_work, tmp_logs)

    patterns_compilation_to_runner = complete_config_obj["autograding"][
        "compilation_to_runner"]
    pattern_copy("compilation_to_runner", patterns_compilation_to_runner,
                 tmp_compilation, tmp_work, tmp_logs)

    # copy input files to tmp_work directory
    copy_contents_into(job_id, test_input_path, tmp_work, tmp_logs)

    subprocess.call(['ls', '-lR', '.'],
                    stdout=open(tmp_logs + "/overall.txt", 'a'))

    # copy runner.out to the current directory
    shutil.copy(os.path.join(bin_path, "run.out"),
                os.path.join(tmp_work, "my_runner.out"))

    # give the untrusted user read/write/execute permissions on the tmp directory & files
    add_permissions_recursive(tmp_work,
                              stat.S_IROTH | stat.S_IWOTH | stat.S_IXOTH,
                              stat.S_IROTH | stat.S_IWOTH | stat.S_IXOTH,
                              stat.S_IROTH | stat.S_IWOTH | stat.S_IXOTH)

    # run the run.out as the untrusted user
    with open(os.path.join(tmp_logs, "runner_log.txt"), 'w') as logfile:
        print("LOGGING BEGIN my_runner.out", file=logfile)
        logfile.flush()

        try:
            if USE_DOCKER:
                runner_success = subprocess.call([
                    'docker', 'exec', '-w', tmp_work, container,
                    os.path.join(tmp_work, 'my_runner.out'),
                    queue_obj['gradeable'], queue_obj['who'],
                    str(queue_obj['version']), submission_string
                ],
                                                 stdout=logfile)
            else:
                runner_success = subprocess.call([
                    os.path.join(SUBMITTY_INSTALL_DIR, "sbin",
                                 "untrusted_execute"), which_untrusted,
                    os.path.join(tmp_work, "my_runner.out"),
                    queue_obj["gradeable"], queue_obj["who"],
                    str(queue_obj["version"]), submission_string
                ],
                                                 stdout=logfile)
            logfile.flush()
        except Exception as e:
            print("ERROR caught runner.out exception={0}".format(str(
                e.args[0])).encode("utf-8"),
                  file=logfile)
            logfile.flush()

        print("LOGGING END my_runner.out", file=logfile)
        logfile.flush()

        killall_success = subprocess.call([
            os.path.join(SUBMITTY_INSTALL_DIR, "sbin", "untrusted_execute"),
            which_untrusted,
            os.path.join(SUBMITTY_INSTALL_DIR, "sbin", "killall.py")
        ],
                                          stdout=logfile)

        print("KILLALL COMPLETE my_runner.out", file=logfile)
        logfile.flush()

        if killall_success != 0:
            msg = 'RUNNER ERROR: had to kill {} process(es)'.format(
                killall_success)
            print("pid", os.getpid(), msg)
            grade_items_logging.log_message(job_id, is_batch_job,
                                            which_untrusted, item_name, "", "",
                                            msg)

    if runner_success == 0:
        print(which_machine, which_untrusted, "RUNNER OK")
    else:
        print(which_machine, which_untrusted, "RUNNER FAILURE")
        grade_items_logging.log_message(job_id,
                                        is_batch_job,
                                        which_untrusted,
                                        item_name,
                                        message="RUNNER FAILURE")

    untrusted_grant_rwx_access(which_untrusted, tmp_work)
    untrusted_grant_rwx_access(which_untrusted, tmp_compilation)

    # --------------------------------------------------------------------
    # RUN VALIDATOR

    with open(os.path.join(tmp_logs, "overall.txt"), 'a') as f:
        print("====================================\nVALIDATION STARTS",
              file=f)

    # copy results files from compilation...
    patterns_submission_to_validation = complete_config_obj["autograding"][
        "submission_to_validation"]
    pattern_copy("submission_to_validation", patterns_submission_to_validation,
                 submission_path, tmp_work, tmp_logs)
    if is_vcs:
        pattern_copy("checkout_to_validation",
                     patterns_submission_to_validation, checkout_subdir_path,
                     tmp_work, tmp_logs)
    patterns_compilation_to_validation = complete_config_obj["autograding"][
        "compilation_to_validation"]
    pattern_copy("compilation_to_validation",
                 patterns_compilation_to_validation, tmp_compilation, tmp_work,
                 tmp_logs)

    # remove the compilation directory
    shutil.rmtree(tmp_compilation)

    # copy output files to tmp_work directory
    copy_contents_into(job_id, test_output_path, tmp_work, tmp_logs)

    # copy any instructor custom validation code into the tmp work directory
    copy_contents_into(job_id, custom_validation_code_path, tmp_work, tmp_logs)

    subprocess.call(['ls', '-lR', '.'],
                    stdout=open(tmp_logs + "/overall.txt", 'a'))

    # copy validator.out to the current directory
    shutil.copy(os.path.join(bin_path, "validate.out"),
                os.path.join(tmp_work, "my_validator.out"))

    # give the untrusted user read/write/execute permissions on the tmp directory & files
    add_permissions_recursive(tmp_work,
                              stat.S_IROTH | stat.S_IWOTH | stat.S_IXOTH,
                              stat.S_IROTH | stat.S_IWOTH | stat.S_IXOTH,
                              stat.S_IROTH | stat.S_IWOTH | stat.S_IXOTH)

    add_permissions(os.path.join(tmp_work, "my_validator.out"),
                    stat.S_IROTH | stat.S_IXOTH)

    # validator the validator.out as the untrusted user
    with open(os.path.join(tmp_logs, "validator_log.txt"), 'w') as logfile:
        if USE_DOCKER:
            validator_success = subprocess.call([
                'docker', 'exec', '-w', tmp_work, container,
                os.path.join(tmp_work, 'my_validator.out'),
                queue_obj['gradeable'], queue_obj['who'],
                str(queue_obj['version']), submission_string
            ],
                                                stdout=logfile)
        else:
            validator_success = subprocess.call([
                os.path.join(SUBMITTY_INSTALL_DIR, "sbin",
                             "untrusted_execute"), which_untrusted,
                os.path.join(tmp_work, "my_validator.out"),
                queue_obj["gradeable"], queue_obj["who"],
                str(queue_obj["version"]), submission_string
            ],
                                                stdout=logfile)

    if validator_success == 0:
        print(which_machine, which_untrusted, "VALIDATOR OK")
    else:
        print(which_machine, which_untrusted, "VALIDATOR FAILURE")
        grade_items_logging.log_message(job_id,
                                        is_batch_job,
                                        which_untrusted,
                                        item_name,
                                        message="VALIDATION FAILURE")

    untrusted_grant_rwx_access(which_untrusted, tmp_work)

    # grab the result of autograding
    grade_result = ""
    with open(os.path.join(tmp_work, "grade.txt")) as f:
        lines = f.readlines()
        for line in lines:
            line = line.rstrip('\n')
            if line.startswith("Automatic grading total:"):
                grade_result = line

    # --------------------------------------------------------------------
    # MAKE RESULTS DIRECTORY & COPY ALL THE FILES THERE
    tmp_results = os.path.join(tmp, "TMP_RESULTS")

    with open(os.path.join(tmp_logs, "overall.txt"), 'a') as f:
        print("====================================\nARCHIVING STARTS", file=f)

    subprocess.call(['ls', '-lR', '.'],
                    stdout=open(tmp_logs + "/overall.txt", 'a'))

    os.makedirs(os.path.join(tmp_results, "details"))

    patterns_work_to_details = complete_config_obj["autograding"][
        "work_to_details"]
    pattern_copy("work_to_details", patterns_work_to_details, tmp_work,
                 os.path.join(tmp_results, "details"), tmp_logs)

    history_file_tmp = os.path.join(tmp_submission, "history.json")
    history_file = os.path.join(tmp_results, "history.json")
    if os.path.isfile(history_file_tmp):
        shutil.move(history_file_tmp, history_file)
        # fix permissions
        ta_group_id = os.stat(tmp_results).st_gid
        os.chown(history_file, int(HWCRON_UID), ta_group_id)
        add_permissions(history_file, stat.S_IRGRP)
    grading_finished = dateutils.get_current_time()

    shutil.copy(os.path.join(tmp_work, "grade.txt"), tmp_results)

    # -------------------------------------------------------------
    # create/append to the results history

    # grab the submission time
    with open(os.path.join(submission_path,
                           ".submit.timestamp")) as submission_time_file:
        submission_string = submission_time_file.read().rstrip()
    submission_datetime = dateutils.read_submitty_date(submission_string)

    gradeable_deadline_datetime = dateutils.read_submitty_date(
        gradeable_deadline_string)
    gradeable_deadline_longstring = dateutils.write_submitty_date(
        gradeable_deadline_datetime)
    submission_longstring = dateutils.write_submitty_date(submission_datetime)

    seconds_late = int(
        (submission_datetime - gradeable_deadline_datetime).total_seconds())
    # note: negative = not late

    with open(os.path.join(tmp_submission, ".grading_began"), 'r') as f:
        grading_began_longstring = f.read()
    grading_began = dateutils.read_submitty_date(grading_began_longstring)
    grading_finished_longstring = dateutils.write_submitty_date(
        grading_finished)

    gradingtime = (grading_finished - grading_began).total_seconds()

    with open(os.path.join(tmp_submission, "queue_file.json"), 'r') as infile:
        queue_obj = json.load(infile)
    queue_obj["gradingtime"] = gradingtime
    queue_obj["grade_result"] = grade_result
    queue_obj["which_untrusted"] = which_untrusted

    with open(os.path.join(tmp_results, "queue_file.json"), 'w') as outfile:
        json.dump(queue_obj,
                  outfile,
                  sort_keys=True,
                  indent=4,
                  separators=(',', ': '))

    with open(os.path.join(tmp_work, "results.json"), 'r') as read_file:
        results_obj = json.load(read_file)
    if 'revision' in queue_obj.keys():
        results_obj['revision'] = queue_obj['revision']
    with open(os.path.join(tmp_results, "results.json"), 'w') as outfile:
        json.dump(results_obj,
                  outfile,
                  sort_keys=True,
                  indent=4,
                  separators=(',', ': '))

    write_grade_history.just_write_grade_history(
        history_file, gradeable_deadline_longstring, submission_longstring,
        seconds_late, queue_time_longstring, is_batch_job_string,
        grading_began_longstring, int(waittime), grading_finished_longstring,
        int(gradingtime), grade_result)

    os.chdir(SUBMITTY_DATA_DIR)

    if USE_DOCKER:
        with open(os.path.join(tmp_logs, "overall_log.txt"), 'w') as logfile:
            chmod_success = subprocess.call([
                'docker', 'exec', '-w', tmp_work, container, 'chmod', '-R',
                'o+rwx', '.'
            ],
                                            stdout=logfile)

    with open(os.path.join(tmp_logs, "overall.txt"), 'a') as f:
        f.write("FINISHED GRADING!\n")

    # save the logs!
    shutil.copytree(tmp_logs, os.path.join(tmp_results, "logs"))

    # zip up results folder
    filehandle, my_results_zip_file = tempfile.mkstemp()
    zip_my_directory(tmp_results, my_results_zip_file)
    os.close(filehandle)
    shutil.rmtree(tmp_autograding)
    shutil.rmtree(tmp_submission)
    shutil.rmtree(tmp_results)
    shutil.rmtree(tmp_work)
    shutil.rmtree(tmp)

    # WIP: extra logging for testing
    #grade_items_logging.log_message(job_id,is_batch_job,which_untrusted,submission_path,message="done grading")

    # --------------------------------------------------------------------
    # CLEAN UP DOCKER
    if USE_DOCKER:
        subprocess.call(['docker', 'rm', '-f', container])
        dockerdestroy_done = dateutils.get_current_time()
        dockerdestroy_time = (dockerdestroy_done -
                              grading_finished).total_seconds()
        grade_items_logging.log_message(job_id, is_batch_job, which_untrusted,
                                        submission_path, "ddt:",
                                        dockerdestroy_time,
                                        "docker container destroyed")

    grade_items_logging.log_message(job_id, is_batch_job, which_untrusted,
                                    item_name, "grade:", gradingtime,
                                    grade_result)

    return my_results_zip_file
Exemple #16
0
def prepare_autograding_and_submission_zip(next_directory, next_to_grade):
    os.chdir(SUBMITTY_DATA_DIR)
    # --------------------------------------------------------
    # figure out what we're supposed to grade & error checking
    obj = load_queue_file_obj(next_directory, next_to_grade)

    partial_path = os.path.join(obj["gradeable"], obj["who"],
                                str(obj["version"]))
    item_name = os.path.join(obj["semester"], obj["course"], "submissions",
                             partial_path)
    submission_path = os.path.join(SUBMITTY_DATA_DIR, "courses", item_name)
    if not os.path.isdir(submission_path):
        grade_items_logging.log_message(
            message="ERROR: the submission directory does not exist" +
            submission_path)
        raise RuntimeError("ERROR: the submission directory does not exist",
                           submission_path)
    print("pid", os.getpid(), "GRADE THIS", submission_path)
    is_vcs, vcs_type, vcs_base_url, vcs_subdirectory = get_vcs_info(
        SUBMITTY_DATA_DIR, obj["semester"], obj["course"], obj["gradeable"],
        obj["who"], obj["team"])

    is_batch_job = next_directory == BATCH_QUEUE
    is_batch_job_string = "BATCH" if is_batch_job else "INTERACTIVE"

    queue_time = get_queue_time(next_directory, next_to_grade)
    queue_time_longstring = dateutils.write_submitty_date(queue_time)
    grading_began = dateutils.get_current_time()
    waittime = (grading_began - queue_time).total_seconds()
    grade_items_logging.log_message(is_batch_job, "zip", item_name, "wait:",
                                    waittime, "")

    # --------------------------------------------------------------------
    # MAKE TEMPORARY DIRECTORY & COPY THE NECESSARY FILES THERE

    tmp = tempfile.mkdtemp()
    tmp_autograding = os.path.join(tmp, "TMP_AUTOGRADING")
    os.mkdir(tmp_autograding)
    tmp_submission = os.path.join(tmp, "TMP_SUBMISSION")
    os.mkdir(tmp_submission)

    # --------------------------------------------------------
    # various paths
    provided_code_path = os.path.join(SUBMITTY_DATA_DIR, "courses",
                                      obj["semester"], obj["course"],
                                      "provided_code", obj["gradeable"])
    test_input_path = os.path.join(SUBMITTY_DATA_DIR, "courses",
                                   obj["semester"], obj["course"],
                                   "test_input", obj["gradeable"])
    test_output_path = os.path.join(SUBMITTY_DATA_DIR, "courses",
                                    obj["semester"], obj["course"],
                                    "test_output", obj["gradeable"])
    custom_validation_code_path = os.path.join(SUBMITTY_DATA_DIR, "courses",
                                               obj["semester"], obj["course"],
                                               "custom_validation_code",
                                               obj["gradeable"])
    bin_path = os.path.join(SUBMITTY_DATA_DIR, "courses", obj["semester"],
                            obj["course"], "bin", obj["gradeable"])
    form_json_config = os.path.join(SUBMITTY_DATA_DIR, "courses",
                                    obj["semester"], obj["course"], "config",
                                    "form",
                                    "form_" + obj["gradeable"] + ".json")
    complete_config = os.path.join(
        SUBMITTY_DATA_DIR, "courses", obj["semester"], obj["course"], "config",
        "complete_config", "complete_config_" + obj["gradeable"] + ".json")

    copytree_if_exists(provided_code_path,
                       os.path.join(tmp_autograding, "provided_code"))
    copytree_if_exists(test_input_path,
                       os.path.join(tmp_autograding, "test_input"))
    copytree_if_exists(test_output_path,
                       os.path.join(tmp_autograding, "test_output"))
    copytree_if_exists(custom_validation_code_path,
                       os.path.join(tmp_autograding, "custom_validation_code"))
    copytree_if_exists(bin_path, os.path.join(tmp_autograding, "bin"))
    shutil.copy(form_json_config, os.path.join(tmp_autograding, "form.json"))
    shutil.copy(complete_config,
                os.path.join(tmp_autograding, "complete_config.json"))

    checkout_path = os.path.join(SUBMITTY_DATA_DIR, "courses", obj["semester"],
                                 obj["course"], "checkout", partial_path)
    results_path = os.path.join(SUBMITTY_DATA_DIR, "courses", obj["semester"],
                                obj["course"], "results", partial_path)

    # grab a copy of the current history.json file (if it exists)
    history_file = os.path.join(results_path, "history.json")
    history_file_tmp = ""
    if os.path.isfile(history_file):
        filehandle, history_file_tmp = tempfile.mkstemp()
        shutil.copy(history_file, history_file_tmp)
        shutil.copy(history_file, os.path.join(tmp_submission, "history.json"))

    # get info from the gradeable config file
    with open(complete_config, 'r') as infile:
        complete_config_obj = json.load(infile)

    checkout_subdirectory = complete_config_obj["autograding"].get(
        "use_checkout_subdirectory", "")
    checkout_subdir_path = os.path.join(checkout_path, checkout_subdirectory)
    queue_file = os.path.join(next_directory, next_to_grade)

    # switch to tmp directory
    os.chdir(tmp)

    # make the logs directory
    tmp_logs = os.path.join(tmp, "TMP_SUBMISSION", "tmp_logs")
    os.makedirs(tmp_logs)
    # 'touch' a file in the logs folder
    open(os.path.join(tmp_logs, "overall.txt"), 'a')

    # grab the submission time
    with open(os.path.join(submission_path,
                           ".submit.timestamp")) as submission_time_file:
        submission_string = submission_time_file.read().rstrip()

    submission_datetime = dateutils.read_submitty_date(submission_string)

    # --------------------------------------------------------------------
    # CHECKOUT THE STUDENT's REPO
    if is_vcs:
        # is vcs_subdirectory standalone or should it be combined with base_url?
        if vcs_subdirectory[0] == '/' or '://' in vcs_subdirectory:
            vcs_path = vcs_subdirectory
        else:
            if '://' in vcs_base_url:
                vcs_path = urllib.parse.urljoin(vcs_base_url, vcs_subdirectory)
            else:
                vcs_path = os.path.join(vcs_base_url, vcs_subdirectory)

        with open(os.path.join(tmp_logs, "overall.txt"), 'a') as f:
            print("====================================\nVCS CHECKOUT", file=f)
            print('vcs_base_url', vcs_base_url, file=f)
            print('vcs_subdirectory', vcs_subdirectory, file=f)
            print('vcs_path', vcs_path, file=f)
            print(['/usr/bin/git', 'clone', vcs_path, checkout_path], file=f)

        # cleanup the previous checkout (if it exists)
        shutil.rmtree(checkout_path, ignore_errors=True)
        os.makedirs(checkout_path, exist_ok=True)
        subprocess.call(['/usr/bin/git', 'clone', vcs_path, checkout_path])
        os.chdir(checkout_path)

        # determine which version we need to checkout
        what_version = subprocess.check_output([
            'git', 'rev-list', '-n', '1',
            '--before="' + submission_string + '"', 'master'
        ])
        what_version = str(what_version.decode('utf-8')).rstrip()
        if what_version == "":
            # oops, pressed the grade button before a valid commit
            shutil.rmtree(checkout_path, ignore_errors=True)
        else:
            # and check out the right version
            subprocess.call(['git', 'checkout', '-b', 'grade', what_version])
        os.chdir(tmp)
        subprocess.call(['ls', '-lR', checkout_path],
                        stdout=open(tmp_logs + "/overall.txt", 'a'))

    copytree_if_exists(submission_path,
                       os.path.join(tmp_submission, "submission"))
    copytree_if_exists(checkout_path, os.path.join(tmp_submission, "checkout"))
    obj["queue_time"] = queue_time_longstring
    obj["is_batch_job"] = is_batch_job
    obj["waittime"] = waittime

    with open(os.path.join(tmp_submission, "queue_file.json"), 'w') as outfile:
        json.dump(obj,
                  outfile,
                  sort_keys=True,
                  indent=4,
                  separators=(',', ': '))

    grading_began_longstring = dateutils.write_submitty_date(grading_began)
    with open(os.path.join(tmp_submission, ".grading_began"), 'w') as f:
        print(grading_began_longstring, file=f)

    # zip up autograding & submission folders
    my_autograding_zip_file = tempfile.mkstemp()[1]
    my_submission_zip_file = tempfile.mkstemp()[1]
    zip_my_directory(tmp_autograding, my_autograding_zip_file)
    zip_my_directory(tmp_submission, my_submission_zip_file)
    # cleanup
    shutil.rmtree(tmp_autograding)
    shutil.rmtree(tmp_submission)
    shutil.rmtree(tmp)

    return (my_autograding_zip_file, my_submission_zip_file)