def home(request,lesson=1): print settings.MEDIA_ROOT teacher = TeacherFile.objects.get(num=lesson) print type(teacher) if request.method == "POST": form = StudentFileForm(request.POST, request.FILES) print form.is_valid() if form.is_valid(): data = form.save() id = data.id print "id = "+str(id) student = StudentFile.objects.get(id=id) turl = settings.MEDIA_ROOT+str(teacher.file_upload.url).replace("media/","") surl = settings.MEDIA_ROOT+str(student.file_upload.url).replace("media/", "") print turl print surl results = grade.grade(turl,surl,id) thObj = threading.Thread(target=draw, args=[id]) thObj.start() thObj.join() missed = "" extra = "" for miss in results['missed']: missed+=miss for ex in results['extra']: extra += ex if missed == "": missed = "None" if extra == "": extra = "None" context = {'form':form,'flag':True,'teacher':teacher,'student':student,'id':id,'missed':missed,'extra':extra} else: context = {'form':form,'flag':False,'teacher':teacher} return render(request,"app/home.html",context) elif request.method == "GET": form = StudentFileForm() context = {'form':form,'flag':False,'teacher':teacher} return render(request,"app/home.html",context)
def do_POST(data): statsd.increment('xserver.post-requests') # This server expects jobs to be pushed to it from the queue xpackage = json.loads(data) if xpackage.get('myreq', None): body = xpackage['xqueue_body'] files = xpackage['xqueue_files'] student_response = body['student_response'] payload = body['grader_payload'] grader_config = payload else: body = xpackage['xqueue_body'] files = xpackage['xqueue_files'] # Delivery from the lms body = json.loads(body) student_response = body['student_response'] payload = body['grader_payload'] try: grader_config = json.loads(payload) except ValueError as err: # If parsing json fails, erroring is fine--something is wrong in the content. # However, for debugging, still want to see what the problem is statsd.increment('xserver.grader_payload_error') log.debug("error parsing: '{0}' -- {1}".format(payload, err)) raise log.debug("Processing submission, grader payload: {0}".format(payload)) relative_grader_path = grader_config['grader'] grader_path = os.path.join(settings.GRADER_ROOT, relative_grader_path) start = time() results = grade.grade(grader_path, grader_config, student_response, sandbox) statsd.histogram('xserver.grading-time', time() - start) # Make valid JSON message reply = { 'correct': results['correct'], 'score': results['score'], 'msg': render_results(results) } statsd.increment('xserver.post-replies (non-exception)') return json.dumps(reply)
def test_point_90_got_B(self): self.assertEqual(grade(90), 'B')
def test_point_80_got_C(self): self.assertEqual(grade(80), 'C')
def test_point_71_got_C(self): self.assertEqual(grade(71), 'C')
def test_point_70_got_D(self): self.assertEqual(grade(70), 'D')
def test_point_61_got_D(self): self.assertEqual(grade(61), 'D')
def lti(request): score = 1 print "hi aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa " + score if 'oauth_consumer_key' in request.POST : oauth_consumer_key = request.POST['oauth_consumer_key'] lis_result_sourcedid = request.POST['lis_result_sourcedid'] lis_outcome_service_url = request.POST['lis_outcome_service_url'] back_url = request.POST['back_url'] test_id = request.POST['custom_test_id'] params2 = { 'score': 0.2, 'oauth_consumer_key': oauth_consumer_key, 'lis_result_sourcedid': lis_result_sourcedid, 'lis_outcome_service_url': lis_outcome_service_url, 'test_id': test_id, 'back_url': back_url, } request.session['p'] = params2 if request.method == "POST": params2 = request.session['p'] form = StudentFileForm(request.POST, request.FILES) teacher = TeacherFile.objects.get(num=params2['test_id']) print form.is_valid() if form.is_valid(): data = form.save() id = data.id print "Hi" print "id = " + str(id) student = StudentFile.objects.get(id=id) turl = settings.MEDIA_ROOT + str(teacher.file_upload.url).replace("media/", "") surl = settings.MEDIA_ROOT + str(student.file_upload.url).replace("media/", "") print turl results = grade.grade(turl, surl, id) score = results['score'] #print "hi aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa "+score params2['score']=score thObj = threading.Thread(target=draw, args=[id]) thObj.start() thObj.join() print "It Worked" posting(params2, params2['back_url']) missed = "" extra = "" for miss in results['missed']: missed += miss for ex in results['extra']: extra += ex if missed == "": missed = "None" if extra == "": extra = "None" context = {'form': form, 'flag': True, 'teacher': teacher, 'student': student, 'id': id, 'missed': missed, 'extra': extra,'score':score*10} else: context = {'form': form, 'flag': False, 'teacher': teacher, 'score':score} return render(request, "app/home.html", context) elif request.method == "GET": teacher = TeacherFile.objects.get(num=test_id) form = StudentFileForm() context = {'form': form, 'flag': False, 'teacher': teacher,'score':score} return render(request, "app/lti.html", context)
def test_point_85_got_B(self): self.assertEqual(grade(85), 'B')
async def submission(ws, spsName, uids, code, lang, problem, runtime, memlimit, input, expected, points, userid, subid, customrunoriginal, gid, problem_number): #return status, score, submission log ostatus = -1 # 0 is backend error, 1 is compile fail, 2 is successfully graded, 3 missed sample case (still graded test cases though) oscore = 0 osublog = "" if ws != None: await ws.send( pickle.dumps({ "func": "live", "command": "init", "uid": userid, "subid": subid, "pid": problem, "input_len": len(input), "gid": gid })) osublog += "Submission information:" + "\r\n" osublog += "Code Length: " + str(len(code)) + "\r\n" osublog += "Code Language: " + lang + "\r\n" osublog += "Problem ID: " + str(problem) + "\r\n" osublog += "Problem number: " + str(problem_number) + "\r\n" osublog += "User ID: " + str(userid) + "\r\n" osublog += "Submission ID: " + str(subid) + "\r\n" osublog += "Submission Processing Server Name: " + spsName + "\r\n" osublog += "\r\n" osublog += "Compiling submission..." + "\r\n" osublog += "\r\n" print("New Submission,", subid) cres = compile.compileProgram(code, lang, problem_number) if cres[0] == 0: print("General Error") osublog += "General error." + "\r\n" ostatus = 0 elif cres[0] == 1: print("Lang Error") osublog += "Lang error." + "\r\n" ostatus = 0 elif cres[0] == 2: print("Compile Fail") osublog += "Compile fail." osublog += "\r\n" osublog += "Compile log:" + "\r\n" osublog += first1023(cres[1]) ostatus = 1 if ws != None: await ws.send( pickle.dumps({ "func": "live", "command": "comf", "uid": userid, "subid": subid, "pid": problem, "gid": gid })) elif cres[0] == 3: #print("Compile Success") totalScore = 0 osublog += "Compile success." + "\r\n" osublog += "\r\n" osublog += "Grading submission on sample & test cases..." + "\r\n" osublog += "\r\n" missedSample = False for i in range(len(input)): #print("Case " + str(i+1) + "/" + str(len(input))) if ws != None: await ws.send( pickle.dumps({ "func": "live", "command": "case", "uid": userid, "subid": subid, "pid": problem, "index": i, "gid": gid })) if points[i] == -1: osublog += "Case " + str(i + 1) + "/" + str( len(input)) + " (Sample Case)" + "\r\n" else: osublog += "Case " + str(i + 1) + "/" + str( len(input)) + " (Worth " + str(points[i]) + "pts)" + "\r\n" subprocess.call("rm run/* -R", shell=True) inpFile = open("run/input.txt", "w") inpFile.write(input[i]) inpFile.close() block_ = asyncio.get_event_loop().create_future() rres = [] signal.signal(signal.SIGALRM, run.handleTimeout) signal.alarm(runtime + 1) thr = threading.Thread(target=run.runProgram, args=(cres[1], lang, runtime, memlimit, uids, block_, rres, asyncio.get_event_loop())) thr.start() #rres = run.runProgram(cres[1], lang, runtime, memlimit, uids) await block_ thr.join() signal.alarm(0) #print("RRES:", rres) postSample = False if len(rres[3]): print("Run Time Error") osublog += "Run Time Error." + "\r\n" postSample = True if ws != None: await ws.send( pickle.dumps({ "func": "live", "command": "rte", "uid": userid, "subid": subid, "pid": problem, "index": i, "gid": gid })) elif rres[0] == 0: gres = grade.grade(rres[2], expected[i]) if gres == 0: #print("Correct Output, +"+str(points[i])+"pts (if is not sample)") if ws != None: await ws.send( pickle.dumps({ "func": "live", "command": "co", "uid": userid, "subid": subid, "pid": problem, "index": i, "gid": gid })) if points[i] == -1: osublog += "Correct Output" + "\r\n" else: totalScore += points[i] osublog += "Correct Output, +" + str( points[i]) + "pts" + "\r\n" else: print("Incorrect Output") osublog += "Incorrect Output." + "\r\n" if ws != None: await ws.send( pickle.dumps({ "func": "live", "command": "io", "uid": userid, "subid": subid, "pid": problem, "index": i, "gid": gid })) postSample = True elif rres[0] == 1: print("Run Time Exceeded, Terminated") osublog += "Run Time Exceeded, Terminated." + "\r\n" if ws != None: await ws.send( pickle.dumps({ "func": "live", "command": "rtlet", "uid": userid, "subid": subid, "pid": problem, "index": i, "gid": gid })) postSample = True elif rres[0] == 2: print("Memory Limit Exceeded.") osublog += "Memory Limit Exceeded." + "\r\n" if ws != None: await ws.send( pickle.dumps({ "func": "live", "command": "mlet", "uid": userid, "subid": subid, "pid": problem, "index": i, "gid": gid })) postSample = True else: print("Return Error R") osublog += "Return Error r." + "\r\n" if (points[i] == -1 or customrunoriginal != -1) and postSample: missedSample = True if points[i] == -1: osublog += "Since 'Correct Output' was not achieved, and since this is a sample case, here are the contents of the input file, output file, expected output, and error file:" + "\r\n" elif customrunoriginal != -1: osublog += "Since 'Correct Output' was not achieved, and since this is a custom run, here are the contents of the input file, output file, expected output, and error file:" + "\r\n" osublog += "Input File:" + "\r\n" osublog += first1023(input[i]) osublog += "Output File:" + "\r\n" osublog += first1023(rres[2]) osublog += "Expected Output:" + "\r\n" osublog += first1023(expected[i]) osublog += "Error File:" + "\r\n" osublog += first1023(rres[3]) osublog += "\r\n" print("Total Score:", totalScore) osublog += "Finished grading." + "\r\n" osublog += "Total score: " + str(totalScore) + "\r\n" oscore = totalScore ostatus = 2 if missedSample: ostatus = 3 else: print("Return Error C") osublog += "Return Error C." + "\r\n" ostatus = 0 if ws != None: await ws.send( pickle.dumps({ "func": "live", "command": "stop", "uid": userid, "subid": subid, "pid": problem, "gid": gid })) print("Finished grading submission") return ostatus, oscore, osublog
import cv2 from findCircles import findCircles from findLetters import findLetters from showBlobs import showBlobs from findChoices import findChoices from grade import grade image = cv2.imread("images/Name.jpg") cnts = findCircles(image) text = findLetters(cnts) print text showBlobs(image, cnts) image1 = cv2.imread("images/Surname.jpg") cnts1 = findCircles(image1) text1 = findLetters(cnts1) print text1 showBlobs(image1, cnts1) image2 = cv2.imread("images/answers.jpg") cnts2 = findCircles(image2) showBlobs(image2, cnts2) choices = findChoices(cnts2) grade(choices)
def handle_single_essay(essay): #Needed to ensure that the DB is not wrapped in a transaction and pulls old data transaction.commit_unless_managed() #strip out unicode and other characters in student response #Needed, or grader may potentially fail #TODO: Handle unicode in student responses properly student_response = essay.essay_text.encode('ascii', 'ignore') #Gets both the max scores for each target and the number of targets target_max_scores = json.loads(essay.problem.max_target_scores) target_counts = len(target_max_scores) target_scores=[] for m in xrange(0,target_counts): #Gets latest model for a given problem and target success, created_model=ml_grading_util.get_latest_created_model(essay.problem,m) if not success: error_message = "Could not identify a valid created model!" log.error(error_message) results= RESULT_FAILURE_DICT formatted_feedback="error" return False, error_message #Create grader path from location in submission grader_path = os.path.join(settings.ML_MODEL_PATH,created_model.model_relative_path) #Indicates whether the model is stored locally or in the cloud model_stored_in_s3=created_model.model_stored_in_s3 #Try to load the model file success, grader_data=load_model_file(created_model,use_full_path=False) if success: #Send to ML grading algorithm to be graded results = grade.grade(grader_data, student_response) else: results=RESULT_FAILURE_DICT #If the above fails, try using the full path in the created_model object if not results['success'] and not created_model.model_stored_in_s3: #Before, we used the relative path to load. Possible that the full path may work grader_path=created_model.model_full_path try: success, grader_data=load_model_file(created_model,use_full_path=True) if success: results = grade.grade(grader_data, student_response) else: results=RESULT_FAILURE_DICT except: error_message="Could not find a valid model file." log.exception(error_message) results=RESULT_FAILURE_DICT if m==0: final_results=results if results['success'] == False: error_message = "Unsuccessful grading: {0}".format(results) log.exception(error_message) return False, error_message target_scores.append(int(results['score'])) grader_dict = { 'essay' : essay, 'target_scores' : json.dumps(target_scores), 'grader_type' : GraderTypes.machine, 'feedback' : '', 'annotated_text' : '', 'premium_feedback_scores' : json.dumps([]), 'success' :final_results['success'], 'confidence' : final_results['confidence'], } # Create grader object in controller by posting back results essay_grade = EssayGrade(**grader_dict) essay_grade.save() #Update the essay so that it doesn't keep trying to re-grade essay.has_been_ml_graded = True essay.save() transaction.commit_unless_managed() return True, "Successfully scored!"
def run(name, args): global outFile, doCheck scale = 1 updateList = [UpdateMode.batch, UpdateMode.synchronous] optString = "ha:s:u:p:f:c" processLimit = 100 otherArgs = [] mpiFlags = [] optlist, args = getopt.getopt(args, optString) for (opt, val) in optlist: if opt == '-h': usage(name) elif opt == '-s': scale = float(val) elif opt == '-a': if val == 'x': mpiFlags = [] elif val == 'o': mpiFlags = oldMpiFlags elif val == 'n': mpiFlags = newMpiFlags else: outmsg("Invalid MPI flag specifier '%s'" % val) usage(name) elif opt == '-f': fname = generateFileName(val) try: outFile = open(fname, "w") outmsg("Writing to file '%s'" % fname) except Exception as e: outFile = None outmsg("Couldn't open file '%s'" % fname) elif opt == '-u': ulist = val.split(":") updateList = [] for c in ulist: if c == 's': updateList.append(UpdateMode.synchronous) elif c == 'b': updateList.append(UpdateMode.batch) elif c == 'r': updateList.append(UpdateMode.ratOrder) else: outmsg("Invalid update mode '%s'" % c) usage(name) elif opt == '-c': doCheck = True elif opt == '-p': processLimit = int(val) else: outmsg("Unknown option '%s'" % opt) usage(name) hostInfo = classifyProcessor() tstart = datetime.datetime.now() ok = True for u in updateList: ok = ok and sweep(u, processLimit, scale, mpiFlags, otherArgs) delta = datetime.datetime.now() - tstart secs = delta.seconds + 24 * 3600 * delta.days + 1e-6 * delta.microseconds outmsg("Total test time = %.2f secs." % secs) grade.grade(ok, gmeanDict, sys.stdout, hostInfo) if outFile: grade.grade(ok, gmeanDict, outFile, hostInfo) outFile.close()
for i in range(0, len(dataNO)): dataNO[i][1] = 'SN_' + dataSN[i][1] dataNO[i][2] = 'VS_' + dataVS[i][1] dataNO[i][3] = 'FT_' + dataFT[i][1] dataNO[i][4] = 'BW_' + dataBW[i][1] dataNO[i][5] = 'PG_' + dataPG[i][1] dataNO[i][6] = 'IF_' + dataIF[i][1] dataNO[i][7] = 'NS_' + dataNS[i][1] dataNO[i][8] = 'NA_' + dataMA[i][1] dataNO[i][9] = 'SSL_' + dataSSL[i][1] dataNO[i][10] = 'O_' + dataO[i][1] numpy.savetxt('dataOut\dataNO.txt', dataNO, fmt='%s', delimiter=',') dataGrade = open('dataIn\grade.txt') dataG = grade.grade(dataGrade) numpy.savetxt('dataOut\dataG.txt', dataG, fmt='%s') A = [] B = [] C = [] for i in range(0, len(dataG)): for j in range(0, len(dataNO)): if dataG[i][1] == 'C': if dataG[i][0] == dataNO[j][0]: C.append([ dataNO[j][1], dataNO[j][2], dataNO[j][3], dataNO[j][4],
def test_point_91_got_A(self): self.assertEqual(grade(91), 'A')
def test_point_100_got_A(self): self.assertEqual(grade(100), 'A')
from grade import grade from engine import TemplateEngine def demo(): """James's demo functionality from our class session on 9/21.""" # Prepare a template engine -- this is what fills in templates, # given all template and slot definitions in # "templates/basic_templates.txt". engine = TemplateEngine(file_path="templates/basic_templates.txt") # Write out ten lines that are generated from the template called # "SENTENCE" multi_line_output = "" for i in range(4): output = engine.generate(template_name="SENTENCE") multi_line_output += "\n" + " " * i + output print(f"Demo by James{multi_line_output}") #demo() print("*Welcome to Yemi's Template World*") # Uncomment this function to print out all your results, at any time (see grade.py) grade()
def test_point_zero_got_F(self): self.assertEqual(grade(0), 'F')
os.system('pip install matplotlib') # In[16]: #os.run_line_magic('matplotlib', 'inline') # In[17]: import grade import importlib importlib.reload(grade) ok = Notebook('lab02.ok') _ = ok.auth(inline=True) name="test1_3" points_per_test=2.5 comments="" grade.grade(name, points_per_test, comments, ok) # In[ ]: