def add_student(): while True: x = file_manager.read_json(teacher_name + '.json', {}) if not x: students = [] num = 0 else: students = x['all_student'] num = int(x['num']) s_name = input('请输入学生姓名:') s_age = input('请输入年龄:') s_sex = input('请输入性别:') s_tel = input('请输入联系电话:') num += 1 # 字符串的zfill方法,在字符串的前面补0. s_id = 'stu_' + str(num).zfill(4) s = model.Student(s_id, s_name, s_age, s_sex, s_tel) students.append(s.__dict__) data = {'all_student': students, 'num': len(students)} file_manager.write_json(teacher_name + '.json', data) choice = input('添加成功!\n1.继续\n2.返回\n请选择(1-2):') if choice == '2': break
def add_student(): students_json = file_manager.read_json(name + '.json', {}) if not students_json: students = [] num = 0 else: students = students_json['all_student'] num = int(students_json['num']) while True: s_name = input('请输入学生姓名:') s_age = input('请输入年龄:') s_gender = input('请输入性别:') s_tel = input('请输入电话:') num += 1 s_id = 'stu_' + str(num).zfill(4) # 创建Student对象 s = model.Student(s_id, s_name, s_age, s_gender, s_tel) students.append(s.__dict__) # 拼接字典 data = {'all_student': students, 'num': len(students)} # 把数据写入文件 file_manager.write_json(name + '.json', data) choice = input('添加成功!\n1.继续\n2.返回\n请选择(1-2)') if choice == '2': break elif choice != '2' and choice != '1': print('输入有误!')
def add_student(): x = file_manager.read_json(name, {}) if not x: students = [] num = 0 else: students = x['all_student'] num = int(x['num']) while True: s_name = input('请输入学生姓名:') s_age = input('请输入学生年龄:') s_gender = input('请输入学生性别:') s_tel = input('请输入学生电话号码:') num += 1 # zfill为字符串补齐方法,前面补0 s_id = 'stu_' + str(num).zfill(4) # num存在重复问题 # 创建一个Student对象 s = model.Student(s_id, s_name, s_age, s_gender, s_tel) students.append(s.__dict__) data = {'all_student': students, 'num': len(students)} file_manager.write_json(name, data) choice = input('添加成功!\n继续1\n返回2\n请选择:') if choice == '2': break
def add_student(): system_data = file_manager.read_json(USER_NAME + '.json', {}) all_student = system_data.get(ALL_STUDENT, []) while True: # 1.输入学生信息 name = input('请输入姓名:') age = input('请输入年龄:') gender = input('请输入性别:') tel = input('请输入电话:') # 创建学号 count = system_data.get(COUNT, 0) count += 1 s_id = 'stu' + str(count).zfill(4) # 2.创建学生对象 s = model.Student(s_id, name, age, gender, tel) # 4. 更新数据 all_student.append(s.__dict__) system_data[ALL_STUDENT] = all_student system_data[COUNT] = count # 5. 更新文件 file_manager.write_json(USER_NAME + '.json', system_data) print('添加成功!') # 6.给出选择 value = input('1.继续\n2.返回\n请选择(1-2):') if value != '1': break
def post(self): data = request.json res = entities.check_token(request.headers["Authorization"]) if res == None: raise Exception("invalid token") selected_class = data["selected_class"] year = data["year"] name = data["name"] seat_num = data["seat_num"] school_id = data["school_id"] _, group = res if group != entities.group_admin: return err.not_allow_error for item in zip(name, seat_num, school_id): __name, __seat_num, __school_id = item if len(__name) * len(__seat_num) * len(__school_id) == 0: continue __seat_num = "{:02d}".format(int(__seat_num)) new_stu = model.Student( username=f"{year}{selected_class}{__seat_num}", password=__school_id, school_id=__school_id, name=__name, project_id=-1, ) model.db.session.add(new_stu) model.db.session.commit() return {"status": "success"}
def get_student(id): conn = get_connection() cursor = conn.cursor() try: cursor.execute("select * from student where id = %s", (id, )) student = cursor.fetchone() return_student = model.Student(student[0], student[1]) return_student.birthday = student[2] return return_student finally: if cursor: cursor.close() if conn: conn.close()
def add_student(): # 要先读取老师的json文件,没有时,创建一个新字典,有则拿出原有的数据再重写一遍这个json文件,保证数据的持久性 stu_tea_json = file_manager.read_json(name + '.json', {}) global count if not stu_tea_json: students_list = [] # num = 0 else: students_list = stu_tea_json['all_students'] count = int(stu_tea_json['num']) while True: stu_name = input('请输入学生的姓名:') stu_age = input('请输入学生的年龄:') stu_sex = input('请输入学生的性别:') stu_tel = input('请输入学生的电话:') count += 1 # 字符串的zfill()方法,在字符串前补0 s_id = 'stu' + str(count).zfill(4) # 导入model模块,创建一个学生类的实例对象 student = model.Student(s_id, stu_name, stu_age, stu_sex, stu_tel) students_list.append(student.__dict__) data = {'all_students': students_list, 'num': len(students_list)} # print(data) # 将data保存到一个json文件,调用file_manager模块中的write_json方法 file_manager.write_json(name + '.json', data) print('添加成功!') while True: choice = input('1.继续\n2.返回\n请选择(1~2):') if choice == '1': break elif choice == '2': return else: print('输入错误,请重新选择!') continue
def register(cls): """注册方法""" # 提示用户输入注册数据 username = input("请输入注册账号:").strip() password = input("请输入注册密码:").strip() confirm = input("请确认注册密码:").strip() # 账号验证 if model.Student.query_username(username): print("账号已经存在,请重新注册") return cls.show_login() # 验证密码 if password != confirm: print("两次密码输入不一致,请重新注册。") return cls.show_login() # 创建对象,保存数据 student = model.Student(username, password) student.save() # 跳转回登录菜单 return cls.show_login()
def save_button(self): if self.action == "add": new_student = m.Student() new_student.name = self.name_entry.get() new_student.booking = int(self.booking_cbox.get()) new_student.dates = [] # TODO: Rework this using row and column notation, where column can be an int instead of a char try: new_student.column = string.ascii_lowercase[len(self.model.students_list) + 1] except IndexError: new_student.column = "A" + string.ascii_lowercase[1] new_student.classes_per_day = [i.get() for i in self.day_cboxes] self.model.students_list.append(new_student) else: index = self.editee_index self.model.students_list[index].name = self.name_entry.get() self.model.students_list[index].booking = self.booking_cbox.get() self.model.students_list[index].classes_per_day = [i.get() for i in self.day_cboxes] self.my_quit()
def create_model(flags, embed_tb, input_ids, logit, labels, n_label, is_training): stud_model = model.Student(flags=flags, embed_tb=embed_tb, input_ids=input_ids) output = stud_model.get_output() with tf.variable_scope("student/loss", reuse=tf.AUTO_REUSE): if is_training: output = tf.nn.dropout(output, rate=flags.dropout) logits = tf.keras.layers.Dense(n_label)(output) log_prob = tf.nn.log_softmax(logits, axis=-1) one_hot_labels = tf.one_hot(labels, depth=n_label, dtype=tf.float32) nll = -tf.reduce_sum(one_hot_labels * log_prob, axis=-1) mse = tf.reduce_sum(tf.squared_difference(logit, logits), axis=-1) loss = tf.reduce_mean(nll + mse) return loss, log_prob
def add_student(): x = file_manager.read_json(name + '.json', {}) if not x: students = [] num = 0 else: students = x['all_student'] num = int(x['num']) while True: s_name = input('请输入学生姓名:') s_age = input('请输入年龄:') s_gender = input('请输入性别:') s_tel = input('请输入电话号码:') num += 1 # 字符串的zfill方法,在字符串的前面补 0 s_id = 'stu_' + str(num).zfill(4) # 创建一个Student对象 s = model.Student(s_id, s_name, s_age, s_gender, s_tel) # { # 'all_student': [ # {'name':'zhangsan','age':18,'gender':'男','tel':'110'}, # {'name':'zhangsan','age':18,'gender':'男','tel':'110'}, # ], # 'num': 2 # } students.append(s.__dict__) # 拼接字典 data = {'all_student': students, 'num': len(students)} # print(data) # 把数据写入到文件里 file_manager.write_json(name + '.json', data) choice = input('添加成功!\n1.继续\n2.返回\n请选择(1-2):') if choice == '2': break
def get_info_from_csv(file) -> List[model.Student]: """ parse given csv file to gather student information. """ students: List[model.Student] = [] reader = csv.DictReader(file) for row in itertools.islice(reader, SKIPPED_ROWS, None): row.pop(STUDENT_ID_COLUMN) name = row.pop(FIRST_NAME_COLUMN) + " " + row.pop(LAST_NAME_COLUMN) email = row.pop(EMAIL_COLUMN) note = "" if NOTE_COLUMN in row: note = row.pop(NOTE_COLUMN) grades: Dict[str, float] = {} for problem, grade in row.items(): grades[problem] = float(grade) students.append(model.Student(name, email, grades, note)) return students
import model as p stu = p.Student("sss", 18) p.sayHello()
def add(name,course,age,cgpa): global s s = model.Student(name,course,age,cgpa) return s.addStudent()
def main(args): data_fn = 'plots/prec_cov_list.pk' fig_fn = 'plots/prec_cov' if os.path.exists(data_fn): pc_data = pickle.load(open(data_fn, 'rb')) plot_prec_cov(pc_data['T_list'], pc_data['prec_list'], pc_data['cov_list'], fig_fn) return ## init a snapshot path os.makedirs(args.train.save_root, exist_ok=True) ## init logger sys.stdout = Logger(os.path.join(args.train.save_root, 'out')) ## print args print_args(args) ## init gpus if not args.cpu: print("##GPUs Available: ", len(tf.config.experimental.list_physical_devices('GPU'))) print() ## init datasets print("## init datasets") ds_src = data.MultiSourceDataset( args.data.src, args.aug_params, batch_size=args.data.batch_size, val_shuffle=True, val_aug=True, domain_id=1, color=False if args.data.img_size[2] == 1 else True, size=args.data.img_size[0], sample_ratio=args.data.sample_ratio[0]) assert (len(args.aug_params) == 1) ##TODO ds_tar = getattr(data, args.data.tar)( root=os.path.join('data', args.data.tar.lower()), batch_size=args.data.batch_size, aug_list=args.aug_params[0], val_shuffle=True, val_aug=True, domain_id=0, color=False if args.data.img_size[2] == 1 else True, size=args.data.img_size[0], sample_ratio=args.data.sample_ratio[1]) ds_dom = data.DomainDataset( data.MultiSourceDataset( args.data.src, args.aug_params, batch_size=args.data.batch_size, val_shuffle=True, val_aug=True, test_aug=True, #diff domain_id=1, color=False if args.data.img_size[2] == 1 else True, size=args.data.img_size[0], sample_ratio=args.data.sample_ratio[0]), getattr(data, args.data.tar)( root=os.path.join('data', args.data.tar.lower()), batch_size=args.data.batch_size, aug_list=args.aug_params[0], val_shuffle=True, val_aug=True, test_aug=True, #diff domain_id=0, color=False if args.data.img_size[2] == 1 else True, size=args.data.img_size[0], sample_ratio=args.data.sample_ratio[1])) print() #### ## reliable teacher learning #### mdl_st_base = getattr(model, args.model.base)(num_class=args.model.n_labels, input_shape=args.model.img_size) #mdl_st_base = model.TempCls(mdl_st_base) mdl_st = model.Student(args.model, mdl_st_base, ds_src, ds_tar, ideal=args.ideal) mdl_tc_base = getattr(model, args.model.base)(num_class=args.model.n_labels, input_shape=args.model.img_size) #mdl_tc_base = model.TempCls(mdl_tc_base) mdl_tc = model.Teacher(args.model, mdl_tc_base, ds_src, ds_tar, ideal=args.ideal) ## rename model_t = mdl_tc model_s = mdl_st model_c = model_s.model_base params = args.train params_base = args.train_base params_advtr = args.train_advtr params_iw = args.train_iw params_iw_cal = args.cal_iw params_conf = args.est_conf i_epoch = 1 ## init a model if params.init == 'sourceonly': ##TODO: assume classification print("## init the student model with sourceonly training") model.set_trainable(model_c, True) ## init a learner learner = LearnerCls(params_base, model_c, model_name_postfix='_sourceonlyinit') ## train the model learner.train(ds_src.train, ds_src.val, ds_src.test) ## test the model learner.test(ds_src.test, ld_name='src', verbose=True) print() elif params.init == 'advtr': ##TODO: assume classification print("## init a base model with adversarial training") model.set_trainable(model_c, True) ## init a adv model mdl_adv = getattr(model, params_advtr.model_advtr)(n_in=model_c.dim_feat) ## init a learner learner = LearnerDACls(params_advtr, model.DAN(model_c, mdl_adv), model_name_postfix='_advtrinit') ## train the model learner.train([ds_src.train, ds_dom.train], None, ds_tar.test) ## test the model learner.test(ds_tar.test, ld_name='tar', verbose=True) print() else: raise NotImplementedError ## init iw if model_t.train.model_conf.model_iw is not None: print("## learn IW") model_sd = model_t.train.model_conf.model_iw.model_sd.model model_sd.train() ## init a learner learner_sd = LearnerCls(params_iw, model_sd, model_name_postfix='_iw_epoch_%d' % (i_epoch)) ## train the model learner_sd.train(ds_dom.train, ds_dom.val, ds_dom.test) ## test the model learner_sd.test(ds_dom.test, ld_name='domain', verbose=True) print() ## init a calibraton model model_sd_cal = model_t.train.model_conf.model_iw.model_sd model_sd_cal.train() ## init a calibrator calibrator_iw = CalibratorCls(params_iw_cal, model_sd_cal, model_name_postfix='_iw_cal_epoch_%d' % (i_epoch)) ## calibrate the model calibrator_iw.train(ds_dom.val, ds_dom.val, ds_dom.test) ## test the model calibrator_iw.test(ds_dom.test, ld_name='domain', verbose=True) print() ## 2. learn confidence predictor model_base = model_t.train.model_base model_conf = model_t.train.model_conf model_iw = model_t.train.model_conf.model_iw model_iw_cond = model.CondIW(model_iw, model_conf, ds_src.train, ds_tar.train) ## init a learner learner = LearnerConfPred(params_conf, model_conf, model_base, model_iw_cond, model_name_postfix='_confpred_epoch_%d' % (i_epoch)) ## train the model learner.train(ds_src.val, ds_src.val, ds_tar.test) ## test the model learner.test(ds_tar.test, ld_name='tar', verbose=True) learner.test(ds_tar.train, ld_name='tar (train)', verbose=True) print() else: model_base = model_t.train.model_base model_conf = model_t.train.model_conf ## init a learner learner = LearnerConfPred(params_conf, model_conf, model_base, None, model_name_postfix='_confpred_epoch_%d' % (i_epoch)) ## train the model model_conf.T = tf.Variable(1.0 - params_conf.eps) ##TODO print("T = %f" % (model_conf.T.numpy())) ## test the model learner.test(ds_tar.test, ld_name='tar', verbose=True) learner.test(ds_tar.train, ld_name='tar (train)', verbose=True) ## compute precision and coverage T_list, prec_list, cov_list = [], [], [] for T in np.arange(0.0, 1.0, 0.01): model_conf.T = T prec, n_conf, n = learner.test(ds_tar.train, ld_name='tar (train)', verbose=True) T_list.append(T) prec_list.append(prec.numpy()) cov_list.append(float(n_conf) / float(n)) print(T_list) print(prec_list) print(cov_list) print() T_list = np.array(T_list) prec_list = np.array(prec_list) cov_list = np.array(cov_list) pickle.dump( { 'T_list': T_list, 'prec_list': prec_list, 'cov_list': cov_list }, open(data_fn, 'wb'))
def start(): while True: content = "----------------------\n歡迎來到成績管理系統\n1.管理員登入\n2.學生登入\n3.教授登入\n4.退出\n----------------------\n請選擇1-4\n" operator = input(content) if operator == "1": username = input("請輸入帳號:\n") password = input("請輸入密碼:\n") data = file_manager.read_json("manager.json", {}) if username in data: if password == data[username]: while True: print("---------------") op = input("1.新增學生\n2.新增教授\n3.新增課程\n4.退出\n") if op == "1": student_data = file_manager.read_json( "student.json", {}) student_dataC = student_data.get("all_students") student_size = student_data.get("nums") std_num = "std" + str(int(student_size) + 1) username = input("請輸入帳號:") password = input("請輸入密碼:") enter_year = input("請輸入入學年份(2020):") gender = input("請輸入性別(male/female):") birth = input("請輸入生日(1970/12/25):") s = model.Student(std_num, username, password, enter_year, gender, birth) student_dataC.append(s.__dict__) student_data['all_students'] = student_dataC student_data['nums'] = student_size + 1 file_manager.write_json('student.json', student_data) print("添加成功") op = input("繼續請輸入1,退出請輸入2\n") if op == "2": break elif op == "2": professor_data = file_manager.read_json( "professor.json", {}) professor_dataC = professor_data.get( "all_professor") professor_size = professor_data.get("nums") pid = "pid" + str(int(professor_size) + 1) username = input("請輸入帳號:") password = input("請輸入密碼:") gender = input("請輸入性別(male/female):") birth = input("請輸入生日(1970/12/25):") p = model.Professor(pid, username, password, gender, birth) professor_dataC.append(p.__dict__) professor_data = { "all_professor": professor_dataC, "nums": professor_size + 1 } file_manager.write_json("professor.json", professor_data) print("新增成功\n") elif op == "3": classData = file_manager.read_json( "classname.json", {}) num = input("請輸入課程代號:") classname = input("請輸入課程名稱:") points = input("請輸入學分數:") professor = input("請輸入授課教授:") tp = input("請輸入必選修:") c = model.Class(num, classname, points, professor, tp) classData["all_classname"].append(c.__dict__) file_manager.write_json("classname.json", classData) print("新增成功\n") elif op == "4": break else: print("輸入錯誤") break else: print('管理員帳號密碼錯誤,一秒後跳轉\n') time.sleep(1) start() break elif operator == "2": username = input("請輸入帳號:\n") password = input("請輸入密碼:\n") data = file_manager.read_json("student.json", {}) for ii in data['all_students']: if ii['username'] == username and password == ii['password']: user = ii else: print('學生不存在,一秒後跳轉\n') time.sleep(1) start() while True: print(f"歡迎回來,學生{ii['username']}") op = input("-----------\n1.選課\n2.學生資訊\n3.退出\n") if op == "1": classData = file_manager.read_json('classname.json', {}) classData = classData['all_classname'] for i in classData: print( f"課程號碼: {i['num']}, 課程名稱: {i['classname']}, 學分數: {i['points']}, 教授: {i['professor']},類型:{i['tp']}" ) op = input("請輸入課程號碼:\n") for i in classData: if i['num'] == op: chooseClass = i print(chooseClass['classname']) c = model.Class(chooseClass['num'], chooseClass['classname'], chooseClass['points'], chooseClass['professor'], chooseClass['tp']) for i, j in enumerate(data['all_students']): if j['username'] == user['username']: data['all_students'][i]['classname'].append( c.__dict__) file_manager.write_json('student.json', data) print("新增成功") elif op == "2": testclass = [] for k in user['classname']: testclass.append(k['classname']) print( f"姓名:{user['username']},入學年:{user['enter_year']},性別:{user['gender']},生日:{user['birth']},修課:{testclass}" ) else: break elif operator == "3": username = input("請輸入帳號:\n") password = input("請輸入密碼:\n") data = file_manager.read_json("professor.json", {}) for ii in data['all_professor']: if ii['username'] == username and password == ii['password']: user = ii else: print('教授不存在,一秒後跳轉\n') time.sleep(1) start() while True: print(f"歡迎回來,教授{user['username']}") op = input("-----------\n1.查看修課學生\n2.退出\n") if op == "1": std = [] checkdata = file_manager.read_json("student.json", {}) for i in checkdata['all_students']: for j in i['classname']: if j['professor'] == user['username']: std.append(i['username']) for i in range(len(std)): print(f'學生:{std[i]}') elif op == "2": break elif operator == "4": sys.exit(0) else: print("輸入錯誤!")
import model stu = model.Student("xiaobin", 24) stu.say() model.sayHello()
def post(self): args = parser.parse_args() student = model.Student(str(uuid.uuid4()), args['name']) student.birthday = datetime.strptime(args['birthday'], '%Y-%m-%d') return student_service.add_student(student), 201
def put(self): args = parser.parse_args() student = model.Student(args['id'], args['name']) student.birthday = datetime.strptime(args['birthday'], '%Y-%m-%d') student_service.update_student(student) return 200
data[course2.courseID] = course2 data[course3.courseID] = course3 common.pickleDump(data, course_db) # 老师 teacher1 = model.Teacher('1', 'oldboy', 'oldboy', common.md5Encode('123456'), '8001') teacher2 = model.Teacher('2', 'alex', 'alex', common.md5Encode('123456'), '8001') teacher3 = model.Teacher('3', 'jack', 'jack', common.md5Encode('123456'), '8002') data = {} data[teacher1.id] = teacher1 data[teacher2.id] = teacher2 data[teacher3.id] = teacher3 common.pickleDump(data, teacher_db) # 班级信息 classes1 = model.Classes('1', '1', '1', '20周', '2017-09-01', '8001') classes2 = model.Classes('2', '2', '2', '20周', '2017-09-27', '8001') classes3 = model.Classes('3', '3', '3', '18周', '2017-10-18', '8002') data = {} data[classes1.classesID] = classes1 data[classes2.classesID] = classes2 data[classes3.classesID] = classes3 common.pickleDump(data, classes_db) # 学生 stu = model.Student('1', 'test1', 'test1', 'e10adc3949ba59abbe56e057f20f883e', '8001') data = {} data[stu.id] = stu common.pickleDump(data, student_db)
def main(args): # ## init a snapshot path # os.makedirs(args.train.save_root, exist_ok=True) # ## init logger # sys.stdout = Logger(os.path.join(args.train.save_root, 'out')) # ## print args # print_args(args) snap_list = glob.glob(args.snapshot_prefix + '_*') print(snap_list) print("# experiments = ", len(snap_list)) ## init gpus if not args.cpu: print("##GPUs Available: ", len(tf.config.experimental.list_physical_devices('GPU'))) print() ## init datasets print("## init datasets") ds_src = data.MultiSourceDataset( args.data.src, args.aug_params, batch_size=args.data.batch_size, train_shuffle=True, train_aug=True, val_shuffle=True, val_aug=True, color=False if args.data.img_size[2] == 1 else True, size=args.data.img_size[0], sample_ratio=args.data.sample_ratio[0], resize_pad=True if len(args.data.src) == 1 and args.data.src[0] == 'MNIST' and args.data.tar == 'SVHN' else False, ##TODO: check if it's necessary ) assert (len(args.aug_params) == 1) ##TODO ds_tar = getattr(data, args.data.tar)( root=os.path.join('data', args.data.tar.lower()), batch_size=args.data.batch_size, aug_list=args.aug_params[0], train_shuffle=True, train_aug=True, val_shuffle=True, val_aug=True, color=False if args.data.img_size[2] == 1 else True, size=args.data.img_size[0], sample_ratio=args.data.sample_ratio[1]) ds_dom = data.DomainDataset( data.MultiSourceDataset( args.data.src, args.aug_params, batch_size=args.data.batch_size, train_shuffle=True, train_aug=True, val_shuffle=True, val_aug=True, test_aug=True, # augment all splits domain_id=1, color=False if args.data.img_size[2] == 1 else True, size=args.data.img_size[0], sample_ratio=args.data.sample_ratio[0], resize_pad=True if len(args.data.src) == 1 and args.data.src[0] == 'MNIST' and args.data.tar == 'SVHN' else False, ##TODO: check if it's necessary ), getattr(data, args.data.tar)( root=os.path.join('data', args.data.tar.lower()), batch_size=args.data.batch_size, aug_list=args.aug_params[0], train_shuffle=True, train_aug=True, val_shuffle=True, val_aug=True, test_aug=True, # augment all splits domain_id=0, color=False if args.data.img_size[2] == 1 else True, size=args.data.img_size[0], sample_ratio=args.data.sample_ratio[1])) ##TODO: redundant ds_src_init = data.MultiSourceDataset( args.data.src, args.aug_params_init, batch_size=args.data.batch_size, train_shuffle=True, train_aug=True, val_shuffle=True, val_aug=True, color=False if args.data.img_size[2] == 1 else True, size=args.data.img_size[0], ##TODO sample_ratio=args.data.sample_ratio[0], resize_pad=True if len(args.data.src) == 1 and args.data.src[0] == 'MNIST' and args.data.tar == 'SVHN' else False, ##TODO: check if it's necessary ) assert (len(args.aug_params) == 1) ##TODO ds_tar_init = getattr(data, args.data.tar)( root=os.path.join('data', args.data.tar.lower()), batch_size=args.data.batch_size, aug_list=args.aug_params_init[0], ##TODO train_shuffle=True, train_aug=True, val_shuffle=True, val_aug=True, color=False if args.data.img_size[2] == 1 else True, size=args.data.img_size[0], sample_ratio=args.data.sample_ratio[1]) ds_dom_init = data.DomainDataset( data.MultiSourceDataset( args.data.src, args.aug_params_init, batch_size=args.data.batch_size, train_shuffle=True, train_aug=True, val_shuffle=True, val_aug=True, test_aug=True, # augment all splits domain_id=1, color=False if args.data.img_size[2] == 1 else True, size=args.data.img_size[0], sample_ratio=args.data.sample_ratio[0], resize_pad=True if len(args.data.src) == 1 and args.data.src[0] == 'MNIST' and args.data.tar == 'SVHN' else False, ##TODO: check if it's necessary ), getattr(data, args.data.tar)( root=os.path.join('data', args.data.tar.lower()), batch_size=args.data.batch_size, aug_list=args.aug_params_init[0], ##TODO train_shuffle=True, train_aug=True, val_shuffle=True, val_aug=True, test_aug=True, # augment all splits domain_id=0, color=False if args.data.img_size[2] == 1 else True, size=args.data.img_size[0], sample_ratio=args.data.sample_ratio[1])) ##TODO: redundant ds_src_self = data.MultiSourceDataset( args.data.src, args.aug_params, batch_size=args.data.batch_size, train_shuffle=True, train_aug=True, val_shuffle=True, val_aug=True, domain_id=1, color=False if args.data.img_size[2] == 1 else True, size=args.data.img_size[0], sample_ratio=args.data.sample_ratio[0], resize_pad=True if len(args.data.src) == 1 and args.data.src[0] == 'MNIST' and args.data.tar == 'SVHN' else False, ##TODO: check if... ) assert (len(args.aug_params) == 1) ##TODO ds_tar_self = getattr(data, args.data.tar)( root=os.path.join('data', args.data.tar.lower()), batch_size=args.data.batch_size, aug_list=args.aug_params[0], train_shuffle=True, train_aug=True, val_shuffle=True, val_aug=True, domain_id=0, color=False if args.data.img_size[2] == 1 else True, size=args.data.img_size[0], sample_ratio=args.data.sample_ratio[1], double_aug=True if args.training_type == 'selfcon' else False, ) print() if args.merge_train_val: ds_src.train = data.ChainLoader(ds_src.train, ds_src.val) ds_dom.train = data.ChainLoader(ds_dom.train, ds_dom.val) ## collect stats cls_error_init_list, cal_error_init_list = [], [] cls_error_list, cal_error_list = [], [] perf_epoch_list = [] for snap_root in snap_list: ## ## final student ## ## a student model mdl_st_base = getattr(model, args.model.base)(num_class=args.model.n_labels, input_shape=args.model.img_size) mdl_st = model.Student(args.model, mdl_st_base, ds_src_self, ds_tar_self, ideal=args.ideal) ## load the final student mdl_st.model_base.load_weights( os.path.join(snap_root, 'model_params_final')) ## evaluate learner = LearnerClsSelf(None, None, mdl_st, None) error, ece, *_ = learner.test(ds_tar.test, ld_name=args.data.tar, verbose=True) cls_error_list = np.append(cls_error_list, error.numpy()) cal_error_list = np.append(cal_error_list, ece) print( f"[final, {args.snapshot_prefix}, cls error, n = {len(cls_error_list)}] mean = {np.mean(cls_error_list*100.0):.2f}%, std = {np.std(cls_error_list*100.0):.2f}%" ) print( f"[final, {args.snapshot_prefix}, cal error, n = {len(cal_error_list)}] mean = {np.mean(cal_error_list*100.0):.2f}%, std = {np.std(cal_error_list*100.0):.2f}%" ) ## ## init student ## ## load the init student mdl_fn_init = os.path.basename( glob.glob(os.path.join(snap_root, 'model_params_*init*.index'))[0]) mdl_fn_init = mdl_fn_init[:mdl_fn_init.rfind('_')] if 'sourceonly' in mdl_fn_init: mdl_st.model_base.load_weights( os.path.join(snap_root, mdl_fn_init + '_best')) learner = LearnerClsSelf(None, None, mdl_st, None) else: assert ('advtr' in mdl_fn_init) ## init a adv model mdl_adv = getattr( model, args.train_advtr.model_advtr)(n_in=mdl_st.model_base.dim_feat) mdl_st_adv = model.DAN(mdl_st.model_base, mdl_adv) mdl_st_adv.load_weights( os.path.join(snap_root, mdl_fn_init + '_final')) ## init a learner learner = LearnerDACls(None, mdl_st_adv) ## evaluate error, ece, *_ = learner.test(ds_tar.test, ld_name=args.data.tar, verbose=True) cls_error_init_list = np.append(cls_error_init_list, error.numpy()) cal_error_init_list = np.append(cal_error_init_list, ece) print( f"[init, {args.snapshot_prefix}, cls error, n = {len(cls_error_init_list)}] mean = {np.mean(cls_error_init_list*100.0):.2f}%, std = {np.std(cls_error_init_list*100.0):.2f}%" ) print( f"[init, {args.snapshot_prefix}, cal error, n = {len(cal_error_init_list)}] mean = {np.mean(cal_error_init_list*100.0):.2f}%, std = {np.std(cal_error_init_list*100.0):.2f}%" ) ## ## teacher performance at each step ## if args.no_mid_results: continue cls_error_epoch_list, cal_error_epoch_list, prec_epoch_list, cov_epoch_list = [], [], [], [] for i_epoch in range(1, args.train.n_epochs): # ignore the last ## load print("!!!! currently load best, but may load final later") mdl_st.model_base.load_weights( os.path.join(snap_root, f'model_params_base_epoch_{i_epoch}_best')) ## cls/cal error learner = LearnerClsSelf(None, None, mdl_st, None) error, ece, *_ = learner.test(ds_tar.test, ld_name=args.data.tar, verbose=True) print(error.numpy(), ece) ## precision/coverage learner = LearnerConfPred(None, mdl_st.model_conf, mdl_st.model_base) ## set a constant mdl_st.model_conf.T = tf.Variable(1.0 - args.train_conf.eps) ## test the model prec, n_conf, n = learner.test(ds_tar.test, ld_name='tar', verbose=True) cls_error_epoch_list.append(error.numpy()) cal_error_epoch_list.append(ece) prec_epoch_list.append(prec.numpy()) cov_epoch_list.append(float(n_conf.numpy()) / float(n)) perf_epoch_list.append({ 'cls_error': np.array(cls_error_epoch_list), 'cal_error': np.array(cal_error_epoch_list), 'prec': np.array(prec_epoch_list), 'cov': np.array(cov_epoch_list) }) print() ## save fn = args.snapshot_prefix + '.pk' pickle.dump( { 'cls_error_init': cls_error_init_list, 'cal_error_init': cal_error_init_list, 'cls_error': cls_error_list, 'cal_error': cal_error_list, 'perf_epoch': perf_epoch_list }, open(fn, 'wb'))
def register(): current_user = flask_login.current_user if current_user.discriminator != "instructor": return json.dumps({"error": "No permission for user"}) if request.method == "GET": return render_template("register.html", user=current_user) username = request.form.get("username") if not username: return json.dumps({"error": "Username required"}) existing_user = model.User.query.filter_by(username=username).first() if existing_user: return json.dumps({"error": "Username already exists"}) password = request.form.get("password") if not password: return json.dumps({"error": "Password required"}) info = { "username": username, "password_hash": generate_password_hash(request.form.get("password")), "first_name": request.form.get("first_name"), "last_name": request.form.get("last_name"), "email": request.form.get("email"), "location": request.form.get("location"), "skype_id": request.form.get("skype_id"), "vm_name": request.form.get("vm_name"), "worm_password": request.form.get("worm_password") } for key in info: if not info[key]: info[key] = None if info["email"]: existing_user = model.User.query.filter_by(email=info["email"]).first() if existing_user: return json.dumps({"error": "Email already used"}) user_type = request.form.get("user_type") user = None if user_type == "student": mentor_list = request.form.get("mentors").strip() if mentor_list: info["mentors"] = [] mentor_usernames = [ username.strip() for username in mentor_list.split(",") ] print(mentor_usernames) for mentor_username in mentor_usernames: mentor = model.Mentor.query.filter_by( username=mentor_username).first() if not mentor: return json.dumps( {"error": "Mentor not found: " + mentor_username}) info["mentors"].append(mentor) user = model.Student(**info) elif user_type == "mentor": student_list = request.form.get("students").strip() if student_list: info["students"] = [] student_usernames = [ username.strip() for username in request.form.get("students").split(",") ] for student_username in student_usernames: student = model.Student.query.filter_by( username=student_username).first() if not student: return json.dumps( {"error": "Student not found: " + student_username}) info["students"].append(student) user = model.Mentor(**info) else: user = model.Instructor(**info) db.session.add(user) db.session.commit() return json.dumps({"error": None, "info": {"username": username}})
def get_results_self(args, ds_src, ds_tar, ideal=False): from learning import LearnerClsSelf from learning import TempScalingCls as CalibratorCls #exp_name_list = glob.glob('snapshots/m2m_self_small_?') exp_name_list = [ 'snapshots/m2m_self_small%s_0' % ('_ideal' if ideal else ''), 'snapshots/m2m_self_small%s_1' % ('_ideal' if ideal else ''), 'snapshots/m2m_self_small%s_2' % ('_ideal' if ideal else ''), 'snapshots/m2m_self_small%s_3' % ('_ideal' if ideal else ''), #'snapshots/m2m_self_small_4', 'snapshots/m2m_self_small%s_5' % ('_ideal' if ideal else ''), 'snapshots/m2m_self_small%s_6' % ('_ideal' if ideal else ''), ] error_cls_list, error_cal_list = [], [] for exp_name in exp_name_list: params = types.SimpleNamespace() params.save_root = exp_name params.find_best = False params.load_final = False print(params.save_root) ## init a base model mdl_st_base = getattr(model, args.model.base)(num_class=args.data.n_labels, input_shape=args.data.img_size) mdl_st_base = model.TempCls(mdl_st_base) mdl_st = model.Student(args.model, mdl_st_base, ds_src, ds_tar, ideal=ideal) #mdl_tc_base = getattr(model, args.model.base)(num_class=args.model.n_labels, input_shape=args.model.img_size) #mdl_tc_base = model.TempCls(mdl_tc_base) mdl_tc_base = mdl_st_base #shared model mdl_tc = model.Teacher(args.model, mdl_tc_base, ds_src, ds_tar, ideal=ideal) ## init a learner learner = LearnerClsSelf(params, None, None, mdl_st, mdl_tc, ideal=ideal) ## train the model learner.train(ds_src, None, None) ##TODO: teacher model contains the loaders ## test the model error_cls, error_cal, *_ = learner.test(ds_tar.test, ld_name=args.data.tar, verbose=True) error_cls_list.append(error_cls.numpy()) error_cal_list.append(error_cal) return np.array(error_cls_list), np.array(error_cal_list)