def run_grader(part_id): graded_funcs = { "r9hUX": Test_tf_constant, "kWeLo": Test_tf_square, "E1weD": Test_tf_reshape, "AuDvh": Test_tf_cast, "NI9Co": Test_tf_multiply, "Yz2KA": Test_tf_add, "4uBzv": Test_tf_gradient_tape } g_func = graded_funcs.get(part_id) if g_func is None: print_stderr("The partID provided did not correspond to any graded function.") return failed_cases, num_cases = g_func() score = 1.0 - len(failed_cases) / num_cases if failed_cases: failed_msg = "" for failed_case in failed_cases: failed_msg += f"Failed {failed_case.get('name')}.\nExpected:\n{failed_case.get('expected')},\nbut got:\n{failed_case.get('got')}.\n\n" send_feedback(score, failed_msg) else: send_feedback(score, "All tests passed! Congratulations!")
def run_grader(part_id): graded_funcs = { "pxgPU": Test_map_fn, "fX2cS": Test_set_adam_optimizer, "M6COK": Test_set_sparse_cat_crossentropy_loss, "fbLog": Test_set_sparse_cat_crossentropy_accuracy, "FaW22": Test_prepare_dataset, "C8oAS": Test_train_one_step, "NG4Bn": Test_train } g_func = graded_funcs.get(part_id) if g_func is None: print_stderr( "The partID provided did not correspond to any graded function.") return failed_cases, num_cases = g_func() score = 1.0 - len(failed_cases) / num_cases if failed_cases: failed_msg = "" for failed_case in failed_cases: failed_msg += f"Failed {failed_case.get('name')}.\nExpected:\n{failed_case.get('expected')},\nbut got:\n{failed_case.get('got')}.\n\n" send_feedback(score, failed_msg) else: send_feedback(score, "All tests passed! Congratulations!")
def run_grader(part_id): graded_funcs = { "xV8vX": Test_distribute_datasets, "dmTKR": Test_train_test_step_fns, "cst37": Test_distributed_train_test_step_fns } g_func = graded_funcs.get(part_id) if g_func is None: print_stderr( "The partID provided did not correspond to any graded function.") return try: failed_cases, num_cases = g_func() except: traceback.print_exc() send_feedback( 0.0, "There was a problem grading your submission. Check stderr for more details." ) exit() score = 1.0 - len(failed_cases) / num_cases if failed_cases: failed_msg = "" for failed_case in failed_cases: failed_msg += f"Failed {failed_case.get('name')}.\nExpected:\n{failed_case.get('expected')},\nbut got:\n{failed_case.get('got')}.\n\n" send_feedback(score, failed_msg) else: send_feedback(score, "All tests passed! Congratulations!")
def run_grader(part_id): graded_funcs = { "mylyu": Test_F1ScoreResult, "YQutN": Test_apply_gradients, "8dNmL": Test_train_data_for_one_epoch } g_func = graded_funcs.get(part_id) if g_func is None: print_stderr("The partID provided did not correspond to any graded function.") return failed_cases, num_cases = g_func() score = 1.0 - len(failed_cases) / num_cases if failed_cases: failed_msg = "" for failed_case in failed_cases: failed_msg += f"Failed {failed_case.get('name')}.\nExpected:\n{failed_case.get('expected')},\nbut got:\n{failed_case.get('got')}.\n\n" send_feedback(score, failed_msg) else: send_feedback(score, "All tests passed! Congratulations!")
"mylyu": Test_F1ScoreResult, "YQutN": Test_apply_gradients, "8dNmL": Test_train_data_for_one_epoch } g_func = graded_funcs.get(part_id) if g_func is None: print_stderr("The partID provided did not correspond to any graded function.") return failed_cases, num_cases = g_func() score = 1.0 - len(failed_cases) / num_cases if failed_cases: failed_msg = "" for failed_case in failed_cases: failed_msg += f"Failed {failed_case.get('name')}.\nExpected:\n{failed_case.get('expected')},\nbut got:\n{failed_case.get('got')}.\n\n" send_feedback(score, failed_msg) else: send_feedback(score, "All tests passed! Congratulations!") if __name__ == "__main__": try: part_id = sys.argv[2] except IndexError: print_stderr("Missing partId. Required to continue.") send_feedback(0.0, "Missing partId.") else: run_grader(part_id)
import os import shutil import jupytext from tools import send_feedback submission_dir = "/shared/submission/" # submission_dir = "sub2/" for file in os.listdir(submission_dir): if file.endswith('.ipynb'): learner_notebook = file else: learner_notebook = None if learner_notebook is None: send_feedback(0.0, "No notebook was found in the submission directory.") exit() sub_source = submission_dir + learner_notebook sub_destination = '/grader/submission/submission.ipynb' # sub_destination = 'submission/submission.ipynb' shutil.copyfile(sub_source, sub_destination) nb = jupytext.read("submission/submission.ipynb") jupytext.write(nb, 'submission/submission.py', fmt='py:percent')
import os import shutil from zipfile import ZipFile from tools import send_feedback submission_dir = "/shared/submission/" for file in os.listdir(submission_dir): if file.endswith('.zip'): learner_file = file else: learner_file = None if learner_file is None: send_feedback(0.0, "No .zip was found in the submission directory.") exit() sub_source = submission_dir + learner_file sub_destination = '/grader/mymodel.zip' shutil.copyfile(sub_source, sub_destination) saved_model_path = "./mymodel.zip" with ZipFile(saved_model_path, "r") as zipObj: zipObj.extractall("./")
from disable_warnings import * import sys import tensorflow as tf import tensorflow_datasets as tfds from tools import send_feedback, print_stderr import converter if __name__ == "__main__": try: part_id = sys.argv[2] except IndexError: print_stderr("Missing partId. Required to continue.") send_feedback(0.0, "Missing partId.") exit() else: if part_id != "wNSsr": print_stderr("Invalid partId. Required to continue.") send_feedback(0.0, "Invalid partId.") exit() try: student_model = tf.saved_model.load("./tmp/mymodel/1") except: send_feedback( 0.0, "Your model could not be loaded. Make sure the zip file has the correct contents." ) infer = student_model.signatures["serving_default"] splits = ["train[:80%]", "train[80%:90%]", "train[90%:]"]