def grade_session_with_automatic_model(request,session_code): session = get_object_or_404(Session,code=session_code) task_name = session.task_def.name; ag_model_fn = os.path.join(AUTOGRADING_MODEL_ROOT, task_name, 'ag_grading.bin'); if not os.path.exists(ag_model_fn): return HttpResponse("No model."+ag_model_fn) temp_root=os.path.join('/var/tmp/autograding',session_code) if not os.path.exists(temp_root): os.makedirs(temp_root) create_flat_file.create_grading_flat_file(session.code,temp_root, [0,0,1]) build_te_model.make_predictions_with_model(temp_root,ag_model_fn) (predictions,gt_url) = round_predictions.round_predictions(temp_root) (wrk,is_created)=Worker.objects.get_or_create(worker="AUTO-ADDITIVE-GROVES"); resp=HttpResponse("Done.") for pred, ref in map(None,predictions,gt_url): submission_id = int(ref.split('\t')[0]) resp.write("%d\t%d\n" % (submission_id,pred)) subm = get_object_or_404(SubmittedTask,id=submission_id) (rcd,created)=ManualGradeRecord.objects.get_or_create(submission=subm, worker=wrk); rcd.feedback="Computer-generated grade" rcd.quality=pred; rcd.save(); return resp
def grade_session_with_automatic_model(request, session_code): session = get_object_or_404(Session, code=session_code) task_name = session.task_def.name ag_model_fn = os.path.join(AUTOGRADING_MODEL_ROOT, task_name, "ag_grading.bin") if not os.path.exists(ag_model_fn): return HttpResponse("No model." + ag_model_fn) temp_root = os.path.join("/var/tmp/autograding", session_code) if not os.path.exists(temp_root): os.makedirs(temp_root) create_flat_file.create_grading_flat_file(session.code, temp_root, [0, 0, 1]) build_te_model.make_predictions_with_model(temp_root, ag_model_fn) (predictions, gt_url) = round_predictions.round_predictions(temp_root) (wrk, is_created) = Worker.objects.get_or_create(worker="AUTO-ADDITIVE-GROVES") resp = HttpResponse("Done.") for pred, ref in map(None, predictions, gt_url): submission_id = int(ref.split("\t")[0]) resp.write("%d\t%d\n" % (submission_id, pred)) subm = get_object_or_404(SubmittedTask, id=submission_id) (rcd, created) = ManualGradeRecord.objects.get_or_create(submission=subm, worker=wrk) rcd.feedback = "Computer-generated grade" rcd.quality = pred rcd.save() return resp
def main(argv): try: opts, args = getopt.getopt(argv, "ho:", ["output="]) except getopt.GetoptError: print 'xgb.py [-o [2008] [2012]]' sys.exit(2) for opt, arg in opts: if opt == '-h': print 'xgb.py [-o [2008] [2012]]' sys.exit() elif opt in ("-o", "--output"): if (arg == "2008"): preds = round_predictions(xgb_model.predict(X_test_2008)) # DEBUG print("preds.shape" + str(preds.shape)) make_submission_2008("submissions/xgb_2008.csv", preds) elif (arg == "2012"): preds = round_predictions(xgb_model.predict(X_test_2012)) make_submission_2012("submissions/xgb_2012.csv", preds)
def build_model_from_session(request,session_code): session = get_object_or_404(Session,code=session_code) task_name = session.task_def.name; ag_model_fn = os.path.join(AUTOGRADING_MODEL_ROOT, task_name, 'ag_grading.bin'); ag_training_root = os.path.join(AUTOGRADING_MODEL_ROOT, task_name, 'training'); if not os.path.exists(ag_training_root): #os.rmdir(ag_training_root); os.makedirs(ag_training_root); temp_root=os.path.join('/var/tmp/autograding',session_code) if not os.path.exists(temp_root): os.makedirs(temp_root) resp=HttpResponse("Building grading model:\n",mimetype="text/plain") resp.write("Creating data file\n") resp.flush() create_flat_file.create_grading_flat_file(session.code, ag_training_root, [0.5,0.25,0.25]) resp.write(" done\n") resp.write("Learning the model on train+val\n") resp.flush() build_te_model.build_model(ag_training_root); resp.write(" done\n") ag_trained_model_fn=os.path.join(ag_training_root,'model.bin') build_te_model.make_predictions_with_model(ag_training_root,ag_trained_model_fn) resp.write("Predicting on the testset\n") resp.flush() round_predictions.round_predictions(ag_training_root) resp.write(" done\n") resp.write("----------------\n") resp.write("Testset report\n") rpt_fn=os.path.join(ag_training_root,'preds.txt.report.txt') for s in open(rpt_fn,'r').readlines(): resp.write(s) shutil.copyfile(ag_trained_model_fn,ag_model_fn) return resp
def build_model_from_session(request, session_code): session = get_object_or_404(Session, code=session_code) task_name = session.task_def.name ag_model_fn = os.path.join(AUTOGRADING_MODEL_ROOT, task_name, "ag_grading.bin") ag_training_root = os.path.join(AUTOGRADING_MODEL_ROOT, task_name, "training") if not os.path.exists(ag_training_root): # os.rmdir(ag_training_root); os.makedirs(ag_training_root) temp_root = os.path.join("/var/tmp/autograding", session_code) if not os.path.exists(temp_root): os.makedirs(temp_root) resp = HttpResponse("Building grading model:\n", mimetype="text/plain") resp.write("Creating data file\n") resp.flush() create_flat_file.create_grading_flat_file(session.code, ag_training_root, [0.5, 0.25, 0.25]) resp.write(" done\n") resp.write("Learning the model on train+val\n") resp.flush() build_te_model.build_model(ag_training_root) resp.write(" done\n") ag_trained_model_fn = os.path.join(ag_training_root, "model.bin") build_te_model.make_predictions_with_model(ag_training_root, ag_trained_model_fn) resp.write("Predicting on the testset\n") resp.flush() round_predictions.round_predictions(ag_training_root) resp.write(" done\n") resp.write("----------------\n") resp.write("Testset report\n") rpt_fn = os.path.join(ag_training_root, "preds.txt.report.txt") for s in open(rpt_fn, "r").readlines(): resp.write(s) shutil.copyfile(ag_trained_model_fn, ag_model_fn) return resp
def pred_2008(): lasso_unrounded = modified_predict(lasso, X_test_2008) ridge_unrounded = modified_predict(ridge, X_test_2008) mlp_unrounded = modified_predict(mlp, X_test_2008) xgb_unrouned = xgb_2008.reshape(-1, 1) rand_forest_unrounded = modified_predict(rand_forest, X_test_2008) adaboost_ran_forest_unrounded = modified_predict(adaboost_ran_forest, X_test_2008) adaboost_unrounded = modified_predict(adaboost, X_test_2008) total = xgb_score + adaboost_ran_forest_score + adaboost_score + mlp_score + \ lasso_score + rand_forest_score + ridge_score lasso_weight = lasso_score / total ridge_weight = ridge_score / total xgb_weight = xgb_score / total adaboost_ran_forest_weight = adaboost_ran_forest_score / total adaboost_weight = adaboost_score / total mlp_weight = mlp_score / total rand_forest_weight = rand_forest_score / total lasso_2008 = lasso_weight * lasso_unrounded ridge_2008 = ridge_weight * ridge_unrounded xgb_2008_weighted = xgb_weight * xgb_unrouned adaboost_ran_forest_2008 = adaboost_ran_forest_weight * adaboost_ran_forest_unrounded adaboost_2008 = adaboost_weight * adaboost_unrounded mlp_2008 = mlp_weight * mlp_unrounded rand_forest_2008 = rand_forest_weight * rand_forest_unrounded lasso_unrounded = 0 ridge_unrounded = 0 mlp_unrounded = 0 xgb_unrouned = 0 rand_forest_unrounded = 0 adaboost_ran_forest_unrounded = 0 adaboost_unrounded = 0 print "Starting Adding" temp1 = np.add(np.add(lasso_2008, ridge_2008), xgb_2008_weighted) print "Halway Through Adding" temp2 = np.add( np.add(np.add(adaboost_ran_forest_2008, adaboost_2008), mlp_2008), rand_forest_2008) ensemble = np.add(temp1, temp2) print "Done Adding!" temp1 = 0 temp2 = 0 print ensemble ensemble = round_predictions(ensemble) print "Min of ensemble: ", np.min(ensemble), ". Max: ", np.max(ensemble) return ensemble
def main(argv): try: opts, args = getopt.getopt(argv, "ho:", ["output="]) except getopt.GetoptError: print 'xgb.py [-o [2008] [2012]]' sys.exit(2) for opt, arg in opts: if opt == '-h': print 'xgb.py [-o [2008] [2012]]' sys.exit() elif opt in ("-o", "--output"): if (arg == "2008"): preds = round_predictions(xgb_model.predict(X_test_2008)) # DEBUG print("preds.shape" + str(preds.shape)) make_submission_2008("submissions/xgb_2008.csv", preds) elif (arg == "2012"): preds = round_predictions(xgb_model.predict(X_test_2012)) make_submission_2012("submissions/xgb_2012.csv", preds) if __name__ == "__main__": main(sys.argv[1:]) preds = round_predictions(xgb_model.predict(X_train_2008)) train_error = np.mean(preds == Y_train_2008) print("XGB train error = " + str(train_error)) ver_preds = round_predictions(xgb_model.predict(X_ver_2008)) ver_error = np.mean(ver_preds == Y_ver_2008) print("XGB ver error = " + str(ver_error))
lasso = read_make_pkl("saved_objs/lasso.pkl", gen_lasso) def main(argv): try: opts, args = getopt.getopt(argv, "ho:", ["output="]) except getopt.GetoptError: print 'lasso.py [-o [2008] [2012]]' sys.exit(2) for opt, arg in opts: if opt == '-h': print 'lasso.py [-o [2008] [2012]]' sys.exit() elif opt in ("-o", "--output"): if (arg == "2008"): preds = round_predictions(lasso.predict(X_test_2008)) # DEBUG print("lasso.predict(X_test_2008).shape" + str(lasso.predict(X_test_2008).shape)) make_submission_2008("submissions/lasso_2008.csv", preds) elif (arg == "2012"): preds = round_predictions(lasso.predict(X_test_2012)) make_submission_2012("submissions/lasso_2012.csv", preds) if __name__ == "__main__": main(sys.argv[1:]) preds = round_predictions(lasso.predict(X_train_2008)) train_error = np.mean(preds == Y_train_2008) print("Lasso train error = " + str(train_error))
def lin_reg_modified_predict(model, X): preds = model.predict(X).reshape(-1, 1) mpreds = round_predictions(preds) # Debug print("mpreds.shape: " + str(mpreds.shape)) return mpreds
ridge = read_make_pkl("saved_objs/ridge.pkl", gen_ridge) def main(argv): try: opts, args = getopt.getopt(argv, "ho:", ["output="]) except getopt.GetoptError: print 'ridge.py [-o [2008] [2012]]' sys.exit(2) for opt, arg in opts: if opt == '-h': print 'ridge.py [-o [2008] [2012]]' sys.exit() elif opt in ("-o", "--output"): if (arg == "2008"): preds = round_predictions(ridge.predict(X_test_2008)) # DEBUG print("ridge.predict(X_test_2008).shape" + str(ridge.predict(X_test_2008).shape)) make_submission_2008("submissions/ridge_2008.csv", preds) elif (arg == "2012"): preds = round_predictions(ridge.predict(X_test_2012)) make_submission_2012("submissions/ridge_2012.csv", preds) if __name__ == "__main__": main(sys.argv[1:]) preds = round_predictions(ridge.predict(X_train_2008)) train_error = np.mean(preds == Y_train_2008) print("Ridge train error = " + str(train_error))