def __delete_data(self, timestamp, dt): """ 删除数据 """ self.__logger.info( 'Only keep recent 7 days origin data, so delete those history data.' ) sql = ''' DELETE a FROM {0} a WHERE pose_stat_time <= {1} '''.format( Config.RAW_INPUT_TABLE_COUNT, CommonUtil.get_specific_unixtime( timestamp, days=Config.REAL_TIME_DATA_RESERVED)) self.__db.delete(sql) self.__logger.info('Clean unnecessary teaching room.') sql = ''' DELETE FROM {0} WHERE room_addr NOT IN (SELECT room_addr FROM {1} GROUP BY room_addr) '''.format(Config.REAL_TIME_PEOPLE_TABLE_RTL, Config.SCHOOL_CAMERA_ROOM_TABLE) self.__db.delete(sql) sql = ''' DELETE a FROM {0} a WHERE dt <= '{1}' '''.format( Config.REAL_TIME_PEOPLE_TABLE, CommonUtil.get_specific_date(dt, days=Config.DATA_RESERVED_RAW_WINDOW)) self.__db.delete(sql) self.__logger.info('Finished to delete data.')
class sampleLoginPage(object): def __init__(self, driver): self.driver = driver self.logger = global_var.logger self.commonUtilHdl = CommonUtil(self.driver) self.page_url = 'http://www.stealmylogin.com/demo.html' self.driver.get(self.page_url) def verify_dummy_login(self): result = True #Step - 1 Enter Login UserName if not self.commonUtilHdl.send_value( sampleLoginLocator.sample_page_login_username_field_xpath, sampleValue.sample_password_value): self.logger.error( "Not able to send User Name Value. Please check!!") result = False else: self.logger.info("Entering Username Successful") print('Done') #self.commonUtilHdl.captureScreenshot('after_clicking_card_view_button') #Step - 2 Enter Login Password if not self.commonUtilHdl.send_value( sampleLoginLocator.sample_page_login_password_field_xpath, sampleValue.sample_password_value): self.logger.error( "Not able to send Password Value. Please check!!") result = False else: self.logger.info("Entering Password Successful") #self.commonUtilHdl.captureScreenshot('after_clicking_card_view_button') #Step - 3 Click on Login Button if not self.commonUtilHdl.click_element( sampleLoginLocator.sample_page_login_button_xpath): self.logger.error( "Not able to Click on Login Button. Please check!!") result = False else: self.logger.info("Clicking on Login Button Successful!!") #self.commonUtilHdl.captureScreenshot('after_clicking_card_view_button') #Step - 4 Button time.sleep(10) if not sampleValue.expected_url in self.commonUtilHdl.get_current_url( ): self.logger.error( "Current URL and Expected URL does'nt match. Please check!!") result = False else: self.logger.info("Current URL and Expected URL Matching.!!") #self.commonUtilHdl.captureScreenshot('after_clicking_card_view_button') return result
def preprocessor(self, day): CommonUtil.verify() if self.__teaching: self.__logger.info('教学效果评估--预处理数据') self.truncate_teaching_data(day) self.update_teaching_mental() self.update_teaching_study() self.update_teaching_data(day) self.preprocess_aggregate_teaching(day) elif self.__teacher: self.__logger.info('基于S-T评估教师教学情况') self.truncate_teacher_data(day) self.filter_student_for_teacher(day) self.process_student_actions(day) self.update_teacher_course(day) self.process_teacher_ontime(day) self.process_teacher_emotion(day) self.process_teacher_behavior(day) else: self.__logger.info('基于学生评估教学状态--预处理数据') self.truncate_data(day) self.update_face_id() self.update_course(day) self.preprocess_aggregate(day) self.update_student_info(day)
def Analysis(self, i_dt): '''''' CommonUtil.verify() self.__logger.info("Begin to analyze grade and study_status for class") dates = self.get_calc_dates(i_dt) if not dates: self.__logger.info("No dates that needs to be computed") return {} metrics = {} for dt in dates: class_id = self.__delimiter.join(dt[:-1]) self.__logger.debug("class_id: {0}, Date: {1}".format( class_id, dt[3])) if class_id not in metrics: metrics[class_id] = {} metrics[class_id][dt[3]] = {} grade_levels = self.get_course_grade_level(dt) study_levels = self.get_course_study_status( CommonUtil.get_specific_date(dt[3], Config.ANALYSIS_LOOKBACKWINDOW), dt) for course_name, score in study_levels.items(): if course_name not in Config.FILTER_COURSES and course_name in grade_levels: metrics[class_id][dt[3]][course_name] = [ grade_levels[course_name], score ] self.__logger.debug(str(metrics)) self.__logger.info("Finished to analyze grade and study_status") return metrics
def process(self, day): CommonUtil.verify() if self.__date != '-1': day = self.__date self.truncate_data(day) self.stat_attendance(day) self.stat_exist_attendance(day)
def process(self): CommonUtil.verify() self.__logger.info( "Try to real time the number of students for each teaching room, then output results to the table {0}" .format(Config.REAL_TIME_PEOPLE_TABLE)) unixtimestamp = int(time.time()) dt = CommonUtil.get_date_day() self.__delete_data(unixtimestamp, dt) self.__process_people_count(unixtimestamp) self.__process_history(unixtimestamp) self.__logger.info('Done')
def calculate_teacher_metrics(self, dt): """ emotios return { '教师ID@教师名字1' => { '学院@年级@班级1' => { 'course_name1' => {'happy' => value, 'normal' => value, 'angry' => value}, 'course_name2' => {'happy' => value, 'normal' => value, 'angry' => value} } } } behaviors return { '教师ID@教师名字1' => { '学院@年级@班级1' => { 'course_name1' => { 'scores': (Rt_value, Ch_value), 'behaviors': [(unix_time1, action1), (unix_time2, action2), (unix_time3, action3)] } } } } scores return { '教师ID@教师名字1' => { '学院@年级@班级1' => { 'course_name1' => {'score' => value, 'emotion' => value, 'behavior' => value, 'ontime' => value}, 'course_name2' => {'score' => value, 'emotion' => value, 'behavior' => value, 'ontime' => value} } } } """ CommonUtil.verify() self.__logger.info("==== Try to compute teacher metrics ====") self.__logger.info("==== Begin to compute emotion ====") emotions = self.count_emotions(dt) self.__logger.info("==== End to compute emotion ====") self.__logger.info("==== Begin to compute S-T analysis ====") actions = self.count_behaviors(dt) behaviors = self.compute_teaching_behaviors(actions) self.__logger.info("==== End to compute S-T analysis ====") self.__logger.info("==== Begin to estimate teaching score ====") ontimes = self.count_ontimes(dt) scores = self.compute_teaching_score(emotions, behaviors, ontimes) self.__logger.info("==== End to estimate teaching score ====") return emotions, behaviors, scores
def process(self): CommonUtil.verify() self.__logger.info('开始备份数据,将数据从{0}备份到{1}.'.format( Config.RAW_INPUT_TABLE, Config.RAW_INPUT_TABLE_BAK)) sql = ''' INSERT INTO {0} SELECT * FROM {1} '''.format(Config.RAW_INPUT_TABLE_BAK, Config.RAW_INPUT_TABLE) self.__db.insert(sql) self.__logger.info('开始将表{0}中的数据删除.'.format(Config.RAW_INPUT_TABLE)) sql = ''' TRUNCATE TABLE {0} '''.format(Config.RAW_INPUT_TABLE) self.__db.truncate(sql) self.__logger.info('备份数据结束')
def truncate_data(self, day): '''Truncate all data of intermediate tables''' self.__logger.info( "Try to truncate all data of intermediate tables in {0}".format( Config.INPUT_DB_DATABASE)) tmp_tables = [ Config.INTERMEDIATE_TRACK_TABLE, Config.INTERMEDIATE_COURSE_TABLE, Config.INTERMEDIATE_AGG_TABLE ] for table_name in tmp_tables: self.__logger.info("Begin to drop {0}".format(table_name)) sql = ''' DROP TABLE {0} '''.format(table_name) self.__db.truncate(sql) self.__logger.info("End to drop {0}".format(table_name)) # 对于Config.INTERMEDIATE_TABLE_TRAIN,我们需要保留历史数据(半年) 便于计算人际关系和课堂兴趣 self.__logger.info( "Begin to delete unnecessary data for the table {0}.".format( Config.INTERMEDIATE_TABLE_TRAIN)) sql = ''' DELETE a FROM {0} a WHERE dt = '{1}' OR dt < '{2}' '''.format( Config.INTERMEDIATE_TABLE_TRAIN, day, CommonUtil.get_specific_date(day, Config.DATA_RESERVED_WINDOW)) self.__db.delete(sql) self.__logger.info( "End to delete unnecessary data for the table {0}.".format( Config.INTERMEDIATE_TABLE_TRAIN)) self.__logger.info( "End to truncate or delete all data of intermediate tables in {0}". format(Config.INPUT_DB_DATABASE))
def truncate_teaching_data(self, day): '''Truncate all data of intermediate teaching tables''' # 对于Config.INTERMEDIATE_TEACHING_TABLE(半年) 便于计算人际关系和课堂兴趣 self.__logger.info("Try to delete teaching data in {0}".format( Config.INPUT_DB_DATABASE)) del_tables = [ Config.INTERMEDIATE_TEACHING_MENTAL_TABLE, Config.INTERMEDIATE_TEACHING_STUDY_TABLE, Config.INTERMEDIATE_TEACHING_TABLE ] for table in del_tables: self.__logger.info('Begin to drop table {0}'.format(table)) sql = ''' DROP TABLE {0} '''.format(table) self.__db.delete(sql) sql = ''' DELETE a FROM {0} a WHERE dt = '{1}' OR dt < '{2}' '''.format( Config.INTERMEDIATE_TEACHING_AGG_TABLE, day, CommonUtil.get_specific_date(day, Config.DATA_RESERVED_WINDOW)) self.__db.delete(sql) self.__logger.info("End to delete teaching data in {0}.".format( Config.INPUT_DB_DATABASE))
def count_interest(self, end_date): '''''' sql = ''' SELECT CONCAT(college_name, grade_name, class_name) AS class_id, student_number, course_name, student_study_stat FROM {2} WHERE dt >= '{0}' AND dt < '{1}' '''.format(CommonUtil.get_specific_date(end_date, Config.LOOKBACKWINDOW), end_date, Config.OUTPUT_UI_COURSE_TABLE) res = {} for row in self.__db.select(sql): key = row[0].encode('utf-8') if key not in res: res[key] = {} subKey = row[1].encode('utf-8') if subKey not in res[key]: res[key][subKey] = {} ssKey = row[2].encode('utf-8') if ssKey not in res[key][subKey]: res[key][subKey][ssKey] = 0 if row[3] == '0' or row[3] == '1': # 非常好 + 良好的总计天数 res[key][subKey][ssKey] += 1 else: continue self.__logger.debug(str(res)) return res
def count_course_interest_threshold(self, end_date): """ 不同的课程的上课天数是不一致的。 """ sql = ''' SELECT class_id, course_name, COUNT(DISTINCT dt) as total FROM ( SELECT CONCAT(college_name, grade_name, class_name) AS class_id, course_name, dt FROM {2} WHERE dt >= '{0}' AND dt < '{1}' ) t GROUP BY class_id, course_name; '''.format(CommonUtil.get_specific_date(end_date, Config.LOOKBACKWINDOW), end_date, Config.OUTPUT_UI_COURSE_TABLE) res = {} for row in self.__db.select(sql): key = row[0].encode('utf-8') if key not in res: res[key] = {} course = row[1].encode('utf-8') res[key][course] = max(Config.INTEREST_THRESHOLD['STUDY_STATUS_DAYS_LOWER'], math.ceil(int(row[2]) * Config.INTEREST_THRESHOLD['STUDY_STATUS_DAYS_RATIO'])) self.__logger.debug(str(res)) return res
def __compute_final_score(self, value): """ 计算综合得分 """ return CommonUtil.sigmoid( x=value, alpha=3.0) # 当分布为[0.6, 0.6, 0.6],score的值应该是0.6。因此计算得到alpha的值应该为3。
def __init__(self): super(CreateTable, self).__init__() self.__configs = CommonUtil.parse_arguments() self.__db = DbUtil.DbUtil( self.__configs['dbhost'], Config.INPUT_DB_USERNAME, Config.INPUT_DB_PASSWORD, Config.INPUT_DB_DATABASE if configs['dbname'] is None else configs['dbname'], Config.INPUT_DB_CHARSET) self.__logger = Logger.Logger(__name__)
def truncate_data(self, start_time, end_time, day): '''Truncate all data of intermediate tables''' self.__logger.info( "Try to truncate all data of intermediate tables in {0}".format( Config.INPUT_DB_DATABASE)) m_tables = [ Config.INTERMEDIATE_TRACK_TABLE, Config.INTERMEDIATE_TABLE, Config.INTERMEDIATE_RES_TABLE ] for table_name in m_tables: self.__logger.info("Begin to truncate {0}".format(table_name)) sql = ''' TRUNCATE TABLE {0} '''.format(table_name) self.__db.truncate(sql) self.__logger.info("End to truncate {0}".format(table_name)) # 对于Config.INTERMEDIATE_TABLE_TRAIN,我们需要保留历史数据(半年) 便于计算人际关系和课堂兴趣 self.__logger.info( "Begin to delete unnecessary data for the table {0}.".format( Config.INTERMEDIATE_TABLE_TRAIN)) sql = ''' DELETE FROM {0} WHERE (pose_stat_time >= {1} AND pose_stat_time < {2}) OR (pose_stat_time < {3}) '''.format( Config.INTERMEDIATE_TABLE_TRAIN, start_time, end_time, CommonUtil.get_specific_unixtime(start_time, Config.DATA_RESERVED_WINDOW)) self.__db.delete(sql) self.__logger.info( "End to delete unnecessary data for the table {0}.".format( Config.INTERMEDIATE_TABLE_TRAIN)) # 删除学生出勤表中的数据 self.__logger.info( "Begin to delete unnecessary data from the table {0}.".format( Config.STUDENT_ATTENDANCE)) sql = ''' DELETE FROM {0} WHERE dt = '{1}' '''.format(Config.STUDENT_ATTENDANCE, day) self.__db.delete(sql) self.__logger.info( "End to delete unnecessary data from the table {0}.".format( Config.STUDENT_ATTENDANCE)) # 清空Config.SCHOOL_STUDENT_CLASS_TABLE中所有student_name中带有嘉宾符号的数据 sql = ''' DELETE FROM {0} WHERE student_name LIKE '%{1}%' '''.format(Config.SCHOOL_STUDENT_CLASS_TABLE, Config.PREFIX_GUEST) self.__db.delete(sql) self.__logger.info("End to delete all 嘉宾 records") self.__logger.info( "End to truncate or delete all data of intermediate tables in {0}". format(Config.INPUT_DB_DATABASE))
def getAvgClose(self, stockEneInfo, index, days): date = stockEneInfo.getDate() commonUtil = CommonUtil() dateBefore = commonUtil.getDaysAfter(date, -days) closeList = [] allClose = 0.0 # print(len(_INFOLIST)) for i in range(0, days): if index >= i: infoBefore = self._INFOLIST[index - i] else: break if infoBefore.getDate() >= dateBefore: closeList.append(infoBefore.getClose()) for close in closeList: allClose += close avgClose = allClose / len(closeList) return avgClose
def get_Optimum_Param(self, ene_range): frequencyList = [] paramList = [] for i in range(ene_range.get_upper_min(), ene_range.get_upper_max() + 1): for j in range(ene_range.get_lower_min(), ene_range.get_lower_max() + 1): for k in ene_range.getDays(): # print str(i)+"---"+str(j)+"---"+str(k) param = EneParameter() param.set_code(ene_range.get_code()) param.set_days(k) param.set_lower(j) param.set_upper(i) param.set_start_date(ene_range.get_start_date()) param.set_end_date(ene_range.get_end_date()) frequency = self.get_Standard_Frequency(param) print frequency param.setFrequency(frequency) frequencyList.append(frequency) paramList.append(param) commonutil = CommonUtil() indexList = commonutil.getArrayListMax(frequencyList) optimumParamList = [] for index in indexList: param = paramList[index] optimumParamList.append(param) # print(frequencyList[index]) print(param.get_code()) print(param.get_upper()) print(param.get_lower()) print(param.get_days()) print(param.getFrequency()) print '---------------------------' return optimumParamList
def compute_teaching_score(self, emotions, behaviors, ontimes): """ 根据教师情绪、动作和是否准时衡量教师的教学状态。 """ result = {} for teacher in emotions.keys(): if teacher not in result: result[teacher] = {} for class_id in emotions[teacher].keys(): if class_id not in result[teacher]: result[teacher][class_id] = {} for course, rows in emotions[teacher][class_id].items(): if course not in result[teacher][class_id]: result[teacher][class_id][course] = {} result[teacher][class_id][course][ 'emotion'] = self.__compute_emotion_score(rows) if teacher in ontimes and class_id in ontimes[ teacher] and course in ontimes[teacher][class_id]: result[teacher][class_id][course][ 'ontime'] = self.__compute_ontime_score( ontimes[teacher][class_id][course]) else: result[teacher][class_id][course][ 'ontime'] = Config.TEACHER_ESTIMATE_DEFAULT if teacher in behaviors and class_id in behaviors[ teacher] and course in behaviors[teacher][class_id]: result[teacher][class_id][course][ 'behavior'] = self.__compute_behavior_score( behaviors[teacher][class_id][course] ['behaviors']) else: result[teacher][class_id][course][ 'behavior'] = Config.TEACHER_ESTIMATE_DEFAULT result[teacher][class_id][course][ 'score'] = self.__compute_final_score( value=CommonUtil.get_score_by_triangle([ result[teacher][class_id][course]['emotion'], result[teacher][class_id][course]['ontime'], result[teacher][class_id][course]['behavior'] ])) return result
def truncate_data(self, day): '''Truncate all data of intermediate tables''' # 删除学生出勤相关数据 attendances = [ Config.STUDENT_ATTENDANCE, Config.STUDENT_ATTENDANCE_EXIST ] for table_name in attendances: self.__logger.info( "Begin to delete unnecessary data from the table {0}.".format( table_name)) sql = ''' DELETE a FROM {0} a WHERE dt = '{1}' OR dt < '{2}' '''.format( table_name, day, CommonUtil.get_specific_date( day, Config.DATA_RESERVED_ATTENDANCE_WINDOW)) self.__db.delete(sql) self.__logger.info( "End to delete unnecessary data from the table {0}.".format( table_name)) self.__logger.info( "End to truncate or delete all data of intermediate tables in {0}". format(Config.INPUT_DB_DATABASE))
def __init__(self): super(Main, self).__init__() self.doer = RTCount(CommonUtil.parse_arguments())
def __init__(self, driver): self.driver = driver self.logger = global_var.logger self.commonUtilHdl = CommonUtil(self.driver) self.page_url = 'http://www.stealmylogin.com/demo.html' self.driver.get(self.page_url)
def __init__(self): super(Main, self).__init__() self.doer = Attendance(CommonUtil.parse_arguments())
# -*- coding: utf-8 -*- from win32com.client import Dispatch import pythoncom from CommonUtil import CommonUtil #from Constants import * ComUtil = CommonUtil() #import win32com.client #a=win32com.client.Dispatch("access.application.8") #self.xl = Dispatch("Excel.Application") try: aces=Dispatch("Access.Application") except pythoncom.com_error, (hr, msg, exc, arg): ComUtil.printPythonComError(hr, msg, exc, arg) aces.Visible=1 aces.OpenCurrentDatabase("C://_projectautomation/source/test/Access/MyDB.mdb") aDoCmd = aces.DoCmd QueryName='010_1' View = 0 """ View 선택 요소로서 AcView 형식입니다. AcView는 다음 AcView 상수 중 하나를 사용할 수 있습니다. acViewDesign 1
def analyze_teaching_scores(self, dt): """ 计算教学效果综合得分 """ sql = ''' DELETE FROM {0} WHERE dt = '{1}' '''.format(Config.OUTPUT_TEACHING_CLASS_SCORES, dt) self.__db.delete(sql) sql = ''' INSERT INTO {0} SELECT t2.college_name, t2.grade_name, t2.class_name, NULL AS course_id, t2.course_name, t6.student_study_score, t2.student_emotion_score, t4.student_mental_score, t11.class_concentration_score, t11.class_interactivity_score, t11.class_positivity_score, NULL AS teacher_attitude_score, NULL AS teacher_emotion_score, NULL AS teacher_ethics_score, '{1}' AS dt FROM ( SELECT college_name, grade_name, class_name, course_name, ROUND(AVG(score), 2)AS student_emotion_score FROM ( SELECT dt, college_name, grade_name, class_name, course_name, SUM(rate) AS score FROM {3} WHERE dt >= '{2}' AND dt <= '{1}' AND action_type = 3 AND action_status != '2' GROUP BY dt, college_name, grade_name, class_name, course_name ) t1 GROUP BY college_name, grade_name, class_name, course_name ) t2 JOIN ( SELECT college_name, grade_name, class_name, course_name, ROUND(AVG(score), 2) AS student_mental_score FROM ( SELECT dt, college_name, grade_name, class_name, course_name, SUM(rate) AS score FROM {3} WHERE dt >= '{2}' AND dt <= '{1}' AND action_type = 4 AND action_status != '2' GROUP BY dt, college_name, grade_name, class_name, course_name ) t3 GROUP BY college_name, grade_name, class_name, course_name ) t4 ON t2.college_name = t4.college_name AND t2.grade_name = t4.grade_name AND t2.class_name = t4.class_name AND t2.course_name = t4.course_name JOIN ( SELECT college_name, grade_name, class_name, course_name, ROUND(AVG(score), 2) AS student_study_score FROM ( SELECT dt, college_name, grade_name, class_name, course_name, SUM(rate) AS score FROM {3} WHERE dt >= '{2}' AND dt <= '{1}' AND action_type = 5 AND action_status != '3' GROUP BY dt, college_name, grade_name, class_name, course_name ) t5 GROUP BY college_name, grade_name, class_name, course_name ) t6 ON t2.college_name = t6.college_name AND t2.grade_name = t6.grade_name AND t2.class_name = t6.class_name AND t2.course_name = t6.course_name JOIN ( SELECT t7.college_name, t7.grade_name, t7.class_name, t7.course_name, ROUND(t8.total / t7.total, 2) AS class_positivity_score, ROUND(t9.total / t7.total, 2) AS class_interactivity_score, ROUND(t10.total / t7.total, 2) AS class_concentration_score FROM ( SELECT college_name, grade_name, class_name, course_name, COUNT(DISTINCT dt) AS total FROM {4} WHERE dt >= '{2}' AND dt <= '{1}' GROUP BY college_name, grade_name, class_name, course_name ) t7 JOIN ( SELECT college_name, grade_name, class_name, course_name, SUM(IF(class_positivity != '1', 1, {5})) AS total FROM {4} WHERE class_positivity != '2' AND dt >= '{2}' AND dt <= '{1}' GROUP BY college_name, grade_name, class_name, course_name ) t8 ON t7.college_name = t8.college_name AND t7.grade_name = t8.grade_name AND t7.class_name = t8.class_name AND t7.course_name = t8.course_name JOIN ( SELECT college_name, grade_name, class_name, course_name, SUM(IF(class_interactivity != '2', 1, {5})) AS total FROM {4} WHERE class_interactivity != '3' AND dt >= '{2}' AND dt <= '{1}' GROUP BY college_name, grade_name, class_name, course_name ) t9 ON t7.college_name = t9.college_name AND t7.grade_name = t9.grade_name AND t7.class_name = t9.class_name AND t7.course_name = t9.course_name JOIN ( SELECT college_name, grade_name, class_name, course_name, SUM(IF(class_concentration != '1', 1, {5})) AS total FROM {4} WHERE class_concentration != '2' AND dt >= '{2}' AND dt <= '{1}' GROUP BY college_name, grade_name, class_name, course_name ) t10 ON t7.college_name = t10.college_name AND t7.grade_name = t10.grade_name AND t7.class_name = t10.class_name AND t7.course_name = t10.course_name ) t11 ON t2.college_name = t11.college_name AND t2.grade_name = t11.grade_name AND t2.class_name = t11.class_name AND t2.course_name = t11.course_name '''.format( Config.OUTPUT_TEACHING_CLASS_SCORES, dt, CommonUtil.get_specific_date(dt, Config.ANALYSIS_LOOKBACKWINDOW), Config.OUTPUT_TEACHING_CLASS_STUDENT, Config.OUTPUT_TEACHING_CLASS_DAILY, Config.TEACHING_NORMAL_WEIGHT) self.__db.insert(sql)
for index in range(0, len(self.__courses)): grade_level = str( round(random.uniform(-1, 0.7), 2 ) if self.__courses[index] != item[2] else 0.75) study_level = str( round(random.uniform(-1, 0.7), 2 ) if self.__courses[index] != item[2] else 0.85) sql3 = ''' insert into `student_mental_status_grade_study_daily`(`student_number`,`course_name`,`grade_level`,`study_level`,`dt`) values ('{0}', '{1}', '{2}', '{3}', '{4}') '''.format(item[0], self.__courses[index], grade_level, study_level, cur_dt) self.__db.insert(sql3) sql5 = ''' insert into `school_student_attendance_info`(`course_name`,`class_name`,`grade_name`,`start_time`,`end_time`,`student_number`,`student_name`,`dt`) values ('{0}', '{1}', '{2}', '{3}', '{4}', '{5}', '{6}', '{7}') '''.format(self.__courses[index], self.__class, self.__grade, self.__times[index][0], self.__times[index][1], item[0], item[1], cur_dt) # sql6 = ''' # insert into `school_student_attendance_exist_info`(`course_name`,`class_name`,`grade_name`,`start_time`,`end_time`,`student_number`,`student_name`,`dt`) values ('{0}', '{1}', '{2}', '{3}', '{4}', '{5}', '{6}', '{7}') # '''.format(self.__courses[index], self.__class, self.__grade, self.__times[index][0], self.__times[index][1], item[0], item[1], cur_dt) seed = random.randint(1, 10) if seed >= 8: self.__db.insert(sql5) self.__logger.info('Done') if __name__ == '__main__': obj = MockData(CommonUtil.parse_arguments()) obj.run()
def run(self): cur_dt = CommonUtil.get_date_day( ) if self.__date == '-1' else self.__date self.__db.delete( "delete from student_mental_status_ld where dt = '{0}'".format( cur_dt)) self.__db.delete( "delete from student_mental_status_interest_daily where dt = '{0}". format(cur_dt)) self.__db.delete( "delete from student_mental_status_grade_study_daily where student_number in ('9001', '9002', '9003', '9004', '9005')" ) self.__db.delete( "delete from school_student_attendance_info where dt = '{0}". format(cur_dt)) # self.__db.delete("delete from school_student_attendance_exist_info where dt = '{0}".format(cur_dt)) for item in self.__students: self.__logger.info('学生: {0}'.format(item[1])) student_relationship = str(random.randint(0, 3)) student_emotion = str(random.randint(0, 2)) student_mental_stat = str(random.randint(0, 2)) student_study_stat = str(random.randint(0, 3)) sql1 = ''' insert into `student_mental_status_ld`(`student_number`,`student_name`,`class_id`,`grade_name`,`class_name`,`student_relationship`,`student_emotion`,`student_mental_stat`,`student_study_stat`,`student_interest`,`dt`) values ('{0}', '{1}', '{2}', '{3}', '{4}', '{5}', '{6}', '{7}', '{8}', '{9}', '{10}') '''.format(item[0], item[1], self.__classid, self.__grade, self.__class, student_relationship, student_emotion, student_mental_stat, student_study_stat, item[2], cur_dt) self.__db.insert(sql1) sql2 = ''' insert into `student_mental_status_interest_daily`(`student_number`,`student_name`,`class_id`,`grade_name`,`class_name`,`student_interest`,`dt`) values ('{0}', '{1}', '{2}', '{3}', '{4}', '{5}', '{6}') '''.format(item[0], item[1], self.__classid, self.__grade, self.__class, item[2], cur_dt) self.__db.insert(sql2) for index in range(0, len(self.__courses)): grade_level = str( round(random.uniform(-1, 0.7), 2 ) if self.__courses[index] != item[2] else 0.75) study_level = str( round(random.uniform(-1, 0.7), 2 ) if self.__courses[index] != item[2] else 0.85) sql3 = ''' insert into `student_mental_status_grade_study_daily`(`student_number`,`course_name`,`grade_level`,`study_level`,`dt`) values ('{0}', '{1}', '{2}', '{3}', '{4}') '''.format(item[0], self.__courses[index], grade_level, study_level, cur_dt) self.__db.insert(sql3) sql5 = ''' insert into `school_student_attendance_info`(`course_name`,`class_name`,`grade_name`,`start_time`,`end_time`,`student_number`,`student_name`,`dt`) values ('{0}', '{1}', '{2}', '{3}', '{4}', '{5}', '{6}', '{7}') '''.format(self.__courses[index], self.__class, self.__grade, self.__times[index][0], self.__times[index][1], item[0], item[1], cur_dt) # sql6 = ''' # insert into `school_student_attendance_exist_info`(`course_name`,`class_name`,`grade_name`,`start_time`,`end_time`,`student_number`,`student_name`,`dt`) values ('{0}', '{1}', '{2}', '{3}', '{4}', '{5}', '{6}', '{7}') # '''.format(self.__courses[index], self.__class, self.__grade, self.__times[index][0], self.__times[index][1], item[0], item[1], cur_dt) seed = random.randint(1, 10) if seed >= 8: self.__db.insert(sql5) self.__logger.info('Done')
def __init__(self): super(Main, self).__init__() self.doer = EstimateMental(CommonUtil.parse_arguments())
def init(target): """ 解析excel文件,收集要处理的数据, 为target目标对象生成dto :param target: 目标对象有:openapi\pmbank\通用 """ protocol_file = CodeTemplate.java_template.get("protocol_file") if os.path.isfile(protocol_file): excel_data = xlrd.open_workbook(protocol_file) sheets = excel_data.sheets() sheet_names = excel_data.sheet_names() sheet_num = len(sheets) if 0 >= sheet_num: logger.error("data not found in excel") return index = 0 protocol_data = [] while index < sheet_num: dto_num = 0 need_import_module = [] sheet_origin_name = sheet_names[index].strip() # 从excel读到的sheet名称,为unicode格式, 需转换为utf-8编码 sheet_origin_name = sheet_origin_name.encode('utf-8') sheet_name = excel_conf.get("sheets_name_dict").get( sheet_origin_name) if sheet_name is None: index += 1 logger.warn("sheet_name=%s not in dict, no need to parse" % sheet_origin_name) continue content = { "sheet_name": sheet_name, "dto_elems": [], "need_import_module": [], } table = sheets[index] nrows = table.nrows # 加载excel描述规则 row_format = excel_conf.get("sheets_row_format") if row_format is None: logger.error("miss option sheets_row_format") return min_field_num = row_format.get("field_min_num") if min_field_num is None: logger.error("miss option field_min_num") return field_pos = excel_conf.get("sheets_field_position") if field_pos is None: logger.error("miss option sheets_field_position") return pos_id = field_pos.get("field_id") pos_name = field_pos.get("field_name") pos_comment = field_pos.get("field_comment") pos_type = field_pos.get("field_type") if pos_id is None or pos_name is None or pos_type is None: logger.error( "miss field_id or field_name or field_type in sheets_field_position" ) return # 用于内嵌的文件 nest_dto_filename = "" nest_dto_fp = "" nest_dto_elems = [] nest_dto_import_module = [] lv2_nest_dto_filename = "" lv2_nest_dto_fp = "" lv2_nest_dto_elems = [] lv2_nest_dto_import_module = [] # 公共的解析逻辑,基于excel描述规则 for i in xrange(nrows): # 默认不需要生成嵌套的dto, 目前只支持2层嵌套 need_create_nest_dto = False lv2_need_create_nest_dto = False data = table.row_values(i) if len(data) < min_field_num: logger.warn( "sheet_name=%s row=%s miss some cols, you need at least %s cols" % (sheet_name, i, min_field_num)) continue # 字段位置id print type(data[pos_id]) dto_field_id = data[pos_id] print dto_field_id if data[pos_id] is None: logger.error("sheet_name=%s row=%s miss field_id" % (sheet_origin_name, i)) ErrorUtil.addInvalidFieldId( target, sheet_name, data[pos_name] if data[pos_name] is not None else "unknown") elif type(data[pos_id]) == int or str( data[pos_id]).strip().isdigit(): dto_field_id = int(data[pos_id]) elif type(data[pos_id]) == float: if CommonUtil.isFloat(dto_field_id): # 如果确实为浮点数, 则意味着需要生成嵌套的dto need_create_nest_dto = True else: tmp = str(data[pos_id]).strip() dot_count = tmp.count('.') if dot_count == 1: need_create_nest_dto = True elif dot_count == 2: need_create_nest_dto = True lv2_need_create_nest_dto = True else: print type(data[pos_id]) continue # 一层嵌套是否需要退出 if not need_create_nest_dto and not CommonUtil.is_empty( nest_dto_fp): nest_dto_content = { "sheet_name": nest_dto_filename, "dto_elems": nest_dto_elems, "need_import_module": nest_dto_import_module, } CodeGenerator.gen_code(nest_dto_content, target) # 二层嵌套是否需要退出 if not lv2_need_create_nest_dto and not CommonUtil.is_empty( lv2_nest_dto_fp): lv2_nest_dto_content = { "sheet_name": lv2_nest_dto_filename, "dto_elems": lv2_nest_dto_elems, "need_import_module": lv2_nest_dto_import_module, } CodeGenerator.gen_code(lv2_nest_dto_content, target) # 字段名校验 if CommonUtil.is_empty(data[pos_name]): logger.error( "sheet_name=%s have empty field_name, please check" % sheet_origin_name) ErrorUtil.addEmptyFieldName(target) continue dto_field_name = str(data[pos_name]).strip() dto_field_type = str( CodeTemplate.java_template.get("default_field_type" ) if CommonUtil. is_empty(data[pos_type]) else data[pos_type]).strip() # 字段类型校验--预处理完还是没有, 则过滤该行 if CommonUtil.is_empty(dto_field_type): ErrorUtil.addInvalidFieldType(target, sheet_name, dto_field_name) continue # 特殊处理( pos = dto_field_type.find('(') if pos > 0: dto_field_type = dto_field_type[0:pos] # 字段类型二次校验 -- 检查以'('号开始的类型 if 0 == pos: ErrorUtil.addInvalidFieldType(target, sheet_name, dto_field_name) # 字段注释 dto_field_comment = str("" if data[pos_comment] is None else data[pos_comment]).strip() # 当读到字段id为1,且当前尚未生成过Dto时,需 # 1.更新sheet_name # 2.dto计数加1 # 当读到字段id为1,且已生成过dto时,需 # 1.将已有的content内容加入protocol_data中 # 2.初始化content if 1 == dto_field_id: if 0 == dto_num: content[ "sheet_name"] = sheet_name + CodeTemplate.java_template.get( "default_request_filename_postfix") dto_num += 1 CodeGenerator.setRequestPropertyStyle(target) else: content["need_import_module"] = need_import_module need_import_module = [] CodeGenerator.gen_code(content, target) content[ "sheet_name"] = sheet_name + CodeTemplate.java_template.get( "default_response_filename_postfix") content["dto_elems"] = [] content["need_import_module"] = [] CodeGenerator.setResponsePropertyStyle(target) # 是否包含list类型字段 tmp_type = dto_field_type.lower() if tmp_type in ["list", "array"]: if lv2_need_create_nest_dto and "list" not in lv2_nest_dto_import_module: lv2_nest_dto_import_module.append("list") elif need_create_nest_dto and "list" not in nest_dto_import_module: nest_dto_import_module.append("list") else: if "list" not in need_import_module: need_import_module.append("list") dir_path = CodeTemplate.java_template.get( "default_file_path" ).get("output_" + target) % CodeGenerator.service_name if not os.path.exists(dir_path): os.makedirs(dir_path) # 为Array、List字段生成独立的dto文件 # 并使后续的小数点字段进入该dto内部 # 不支持嵌套array的情况, 所以这里只有为false时,才会生成dto, 否则前面生成的数据会被覆盖 if not need_create_nest_dto: nest_dto_filename = CamelTransformTool.trans_underline_field_to_camel_classname( dto_field_name) + "DTO" nest_dto_fp = open( dir_path + nest_dto_filename + ".java", "w+") nest_dto_elems = [] nest_dto_import_module = [] elif not lv2_need_create_nest_dto: lv2_nest_dto_filename = CamelTransformTool.trans_underline_field_to_camel_classname( dto_field_name) + "DTO" lv2_nest_dto_fp = open( dir_path + lv2_nest_dto_filename + ".java", "w+") lv2_nest_dto_elems = [] lv2_nest_dto_import_module = [] # 是否包含date类型 if tmp_type in ["t", "d", "date"]: if lv2_need_create_nest_dto and "date" not in lv2_nest_dto_import_module: lv2_nest_dto_import_module.append("date") elif need_create_nest_dto and "date" not in nest_dto_import_module: nest_dto_import_module.append("date") else: if "date" not in need_import_module: need_import_module.append("date") # 是否包含decimal类型 if tmp_type in ["b", "decimal", "bigdecimal"]: if lv2_need_create_nest_dto and "big_decimal" not in lv2_nest_dto_import_module: lv2_nest_dto_import_module.append("big_decimal") elif need_create_nest_dto and "big_decimal" not in nest_dto_import_module: nest_dto_import_module.append("big_decimal") else: if "big_decimal" not in need_import_module: need_import_module.append("big_decimal") if dto_field_name is None or dto_field_type is None: logger.error( "dto_field_name or dto_field_type miss, please check you protocol file" ) continue dto_elem = { "name": dto_field_name, "type": dto_field_type, "comment": dto_field_comment, } # 如果是嵌套的,则进入嵌套文件DTO中 if lv2_need_create_nest_dto: lv2_nest_dto_elems.append(dto_elem) elif need_create_nest_dto: nest_dto_elems.append(dto_elem) else: content["dto_elems"].append(dto_elem) # 单个sheet处理结束后,把未生成的文件生成出来 # 一层嵌套 if need_create_nest_dto and not CommonUtil.is_empty( nest_dto_fp): nest_dto_content = { "sheet_name": nest_dto_filename, "dto_elems": nest_dto_elems, "need_import_module": nest_dto_import_module, } CodeGenerator.gen_code(nest_dto_content, target) # 二层嵌套 if lv2_need_create_nest_dto and not CommonUtil.is_empty( lv2_nest_dto_fp): lv2_nest_dto_content = { "sheet_name": lv2_nest_dto_filename, "dto_elems": lv2_nest_dto_elems, "need_import_module": lv2_nest_dto_import_module, } CodeGenerator.gen_code(lv2_nest_dto_content, target) if dto_num > 0: content["need_import_module"] = need_import_module protocol_data.append(content) index += 1 return protocol_data else: logger.error("protocol_file=%s not exist" % protocol_file) print "protocol_file=%s not exist" % protocol_file return []
def __init__(self): super(MainBK, self).__init__() self.doer = BackupData(CommonUtil.parse_arguments())
def estimate(self): """入口函数""" CommonUtil.verify() # 获取最新数据对应的日期 times = CommonUtil.get_range_times() self.__logger.info("Current hour: {}".format(datetime.datetime.now().hour)) diff_days = -1 if datetime.datetime.now().hour <= Config.DATETIME_THRESHOLD else 0 estimate_date = CommonUtil.get_date_day(diff_days) # 是否执行指定日期的数据 if self.__date != '-1': self.__logger.info("执行指定日期({})的数据".format(self.__date)) estimate_date = self.__date start_date = CommonUtil.get_specific_date(self.__date) end_date = CommonUtil.get_specific_date(self.__date, 1) times['start_datetime'] = start_date.strftime("%Y-%m-%d") times['end_datetime'] = end_date.strftime("%Y-%m-%d") times['start_unixtime'] = int(time.mktime(start_date.timetuple())) times['end_unixtime'] = int(time.mktime(end_date.timetuple())) self.__logger.info("Begin to analyze the student data of {0}".format(estimate_date)) self.__logger.info("Begin to preprocess data between {0} and {1}.".format(times['start_datetime'], times['end_datetime'])) self.__preprocessor.preprocessor(estimate_date) if self.__teaching: self.__logger.info('在班级+科目的维度对教学效果进行评估') teaching_metrics = self.__teaching_metric.calculate_teaching_metrics(estimate_date) self.__logger.info('将分析结果存储到数据库中') self.__poster.post_teaching(teaching_metrics, estimate_date) teaching_analysis_metrics = self.__teaching_analyzer.Analysis(estimate_date) self.__poster.post_teaching_study_grade(teaching_analysis_metrics) self.__teaching_analyzer.analyze_teaching_scores(estimate_date) elif self.__teacher: self.__logger.info('基于S-T分析法评估教师的教学情况') emotions, behaviors, scores = self.__teacher_metric.calculate_teacher_metrics(estimate_date) self.__poster.post_teacher_emotions(emotions, estimate_date) self.__poster.post_teacher_behaviors(behaviors, estimate_date) self.__poster.post_teacher_scores(scores, estimate_date) else: # 评估学生 # 获得学生基本信息 students = self.get_students() # 先计算分科目的指标,因为兴趣需要基于这个数据计算 self.__logger.info("Begin to compute and post daily course metrics") course_metrics = self.__course.calculate_course_metrics(times['start_unixtime'], times['end_unixtime'], estimate_date) students = self.__poster.post_course_metric(course_metrics, estimate_date, students) self.__logger.info("Finished to compute and post daily course metrics") self.__logger.info("Begin to compute and post daily metrics") metrics = self.__metric.calculate_daily_metrics(times['start_unixtime'], times['end_unixtime'], estimate_date) metrics = self.estimate_interest(times['end_datetime'], metrics) students = self.__poster.post(metrics, estimate_date, students) self.__logger.info("Finished to compute and post daily metrics") self.__logger.info("Begin to post Interest") students = self.__poster.post_interest_metric(self.__interests, estimate_date, students) self.__logger.info("Finished to post Interest") # 计算学生成绩与学习状态之间的四象限分析指标 self.__logger.info("Begin to analyze and post Grade and Study_Status") analysis_metrics = self.__analyzer.Analysis(estimate_date) self.__poster.post_grade_study_metric(analysis_metrics, students) # 计算班级整体成绩与学习状态之间的四象限分析指标 self.__logger.info("Begin to analyze and post Grade and Study status for class") class_metrics = self.__analyzer.class_analysis(estimate_date) self.__poster.post_teaching_study_grade(class_metrics) self.__logger.info("Finished to analyze and post")
def init(target): """ 解析excel文件,收集要处理的数据, 为target目标对象生成dto :param target: 目标对象有:openapi\pmbank\通用 """ protocol_file = CodeTemplate.java_template.get("protocol_file") if os.path.isfile(protocol_file): excel_data = xlrd.open_workbook(protocol_file) sheets = excel_data.sheets() sheet_names = excel_data.sheet_names() sheet_num = len(sheets) if 0 >= sheet_num: logger.error("data not found in excel") return index = 0 protocol_data = [] while index < sheet_num: dto_num = 0 need_import_module = [] sheet_origin_name = sheet_names[index].strip() # 从excel读到的sheet名称,为unicode格式, 需转换为utf-8编码 sheet_origin_name = sheet_origin_name.encode('utf-8') sheet_name = excel_conf.get("sheets_name_dict").get(sheet_origin_name) if sheet_name is None: index += 1 logger.warn("sheet_name=%s not in dict, no need to parse" % sheet_origin_name) continue content = { "sheet_name": sheet_name, "dto_elems": [], "need_import_module": [], } table = sheets[index] nrows = table.nrows # 加载excel描述规则 row_format = excel_conf.get("sheets_row_format") if row_format is None: logger.error("miss option sheets_row_format") return min_field_num = row_format.get("field_min_num") if min_field_num is None: logger.error("miss option field_min_num") return field_pos = excel_conf.get("sheets_field_position") if field_pos is None: logger.error("miss option sheets_field_position") return pos_id = field_pos.get("field_id") pos_name = field_pos.get("field_name") pos_comment = field_pos.get("field_comment") pos_type = field_pos.get("field_type") if pos_id is None or pos_name is None or pos_type is None: logger.error("miss field_id or field_name or field_type in sheets_field_position") return # 用于内嵌的文件 nest_dto_filename = "" nest_dto_fp = "" nest_dto_elems = [] nest_dto_import_module = [] lv2_nest_dto_filename = "" lv2_nest_dto_fp = "" lv2_nest_dto_elems = [] lv2_nest_dto_import_module = [] # 公共的解析逻辑,基于excel描述规则 for i in xrange(nrows): # 默认不需要生成嵌套的dto, 目前只支持2层嵌套 need_create_nest_dto = False lv2_need_create_nest_dto = False data = table.row_values(i) if len(data) < min_field_num: logger.warn("sheet_name=%s row=%s miss some cols, you need at least %s cols" % ( sheet_name, i, min_field_num)) continue # 字段位置id print type(data[pos_id]) dto_field_id = data[pos_id] print dto_field_id if data[pos_id] is None: logger.error("sheet_name=%s row=%s miss field_id" % (sheet_origin_name, i)) ErrorUtil.addInvalidFieldId(target, sheet_name, data[pos_name] if data[pos_name] is not None else "unknown") elif type(data[pos_id]) == int or str(data[pos_id]).strip().isdigit(): dto_field_id = int(data[pos_id]) elif type(data[pos_id]) == float: if CommonUtil.isFloat(dto_field_id): # 如果确实为浮点数, 则意味着需要生成嵌套的dto need_create_nest_dto = True else: tmp = str(data[pos_id]).strip() dot_count = tmp.count('.') if dot_count == 1: need_create_nest_dto = True elif dot_count == 2: need_create_nest_dto = True lv2_need_create_nest_dto = True else: print type(data[pos_id]) continue # 一层嵌套是否需要退出 if not need_create_nest_dto and not CommonUtil.is_empty(nest_dto_fp): nest_dto_content = { "sheet_name": nest_dto_filename, "dto_elems": nest_dto_elems, "need_import_module": nest_dto_import_module, } CodeGenerator.gen_code(nest_dto_content, target) # 二层嵌套是否需要退出 if not lv2_need_create_nest_dto and not CommonUtil.is_empty(lv2_nest_dto_fp): lv2_nest_dto_content = { "sheet_name": lv2_nest_dto_filename, "dto_elems": lv2_nest_dto_elems, "need_import_module": lv2_nest_dto_import_module, } CodeGenerator.gen_code(lv2_nest_dto_content, target) # 字段名校验 if CommonUtil.is_empty(data[pos_name]): logger.error("sheet_name=%s have empty field_name, please check" % sheet_origin_name) ErrorUtil.addEmptyFieldName(target); continue dto_field_name = str(data[pos_name]).strip() dto_field_type = str(CodeTemplate.java_template.get("default_field_type") if CommonUtil.is_empty( data[pos_type]) else data[pos_type]).strip() # 字段类型校验--预处理完还是没有, 则过滤该行 if CommonUtil.is_empty(dto_field_type): ErrorUtil.addInvalidFieldType(target, sheet_name, dto_field_name) continue # 特殊处理( pos = dto_field_type.find('(') if pos > 0: dto_field_type = dto_field_type[0:pos] # 字段类型二次校验 -- 检查以'('号开始的类型 if 0 == pos: ErrorUtil.addInvalidFieldType(target, sheet_name, dto_field_name) # 字段注释 dto_field_comment = str("" if data[pos_comment] is None else data[pos_comment]).strip() # 当读到字段id为1,且当前尚未生成过Dto时,需 # 1.更新sheet_name # 2.dto计数加1 # 当读到字段id为1,且已生成过dto时,需 # 1.将已有的content内容加入protocol_data中 # 2.初始化content if 1 == dto_field_id: if 0 == dto_num: content["sheet_name"] = sheet_name + CodeTemplate.java_template.get( "default_request_filename_postfix") dto_num += 1 CodeGenerator.setRequestPropertyStyle(target) else: content["need_import_module"] = need_import_module need_import_module = [] CodeGenerator.gen_code(content, target) content["sheet_name"] = sheet_name + CodeTemplate.java_template.get( "default_response_filename_postfix") content["dto_elems"] = [] content["need_import_module"] = [] CodeGenerator.setResponsePropertyStyle(target) # 是否包含list类型字段 tmp_type = dto_field_type.lower() if tmp_type in ["list", "array"]: if lv2_need_create_nest_dto and "list" not in lv2_nest_dto_import_module: lv2_nest_dto_import_module.append("list") elif need_create_nest_dto and "list" not in nest_dto_import_module: nest_dto_import_module.append("list") else: if "list" not in need_import_module: need_import_module.append("list") dir_path = CodeTemplate.java_template.get("default_file_path").get("output_" + target) % CodeGenerator.service_name if not os.path.exists(dir_path): os.makedirs(dir_path) # 为Array、List字段生成独立的dto文件 # 并使后续的小数点字段进入该dto内部 # 不支持嵌套array的情况, 所以这里只有为false时,才会生成dto, 否则前面生成的数据会被覆盖 if not need_create_nest_dto: nest_dto_filename = CamelTransformTool.trans_underline_field_to_camel_classname( dto_field_name) + "DTO" nest_dto_fp = open(dir_path + nest_dto_filename + ".java", "w+") nest_dto_elems = [] nest_dto_import_module = [] elif not lv2_need_create_nest_dto: lv2_nest_dto_filename = CamelTransformTool.trans_underline_field_to_camel_classname( dto_field_name) + "DTO" lv2_nest_dto_fp = open(dir_path + lv2_nest_dto_filename + ".java", "w+") lv2_nest_dto_elems = [] lv2_nest_dto_import_module = [] # 是否包含date类型 if tmp_type in ["t", "d", "date"]: if lv2_need_create_nest_dto and "date" not in lv2_nest_dto_import_module: lv2_nest_dto_import_module.append("date") elif need_create_nest_dto and "date" not in nest_dto_import_module: nest_dto_import_module.append("date") else: if "date" not in need_import_module: need_import_module.append("date") # 是否包含decimal类型 if tmp_type in ["b", "decimal", "bigdecimal"]: if lv2_need_create_nest_dto and "big_decimal" not in lv2_nest_dto_import_module: lv2_nest_dto_import_module.append("big_decimal") elif need_create_nest_dto and "big_decimal" not in nest_dto_import_module: nest_dto_import_module.append("big_decimal") else: if "big_decimal" not in need_import_module: need_import_module.append("big_decimal") if dto_field_name is None or dto_field_type is None: logger.error("dto_field_name or dto_field_type miss, please check you protocol file") continue dto_elem = { "name": dto_field_name, "type": dto_field_type, "comment": dto_field_comment, } # 如果是嵌套的,则进入嵌套文件DTO中 if lv2_need_create_nest_dto: lv2_nest_dto_elems.append(dto_elem) elif need_create_nest_dto: nest_dto_elems.append(dto_elem) else: content["dto_elems"].append(dto_elem) if dto_num > 0: content["need_import_module"] = need_import_module protocol_data.append(content) index += 1 return protocol_data else: logger.error("protocol_file=%s not exist" % protocol_file) print "protocol_file=%s not exist" % protocol_file return []
def count_face_pose(self, day, is_lookback=False): ''' Compute the count of face pose based on face_id level ''' # TODO 如何将face_pose_stat考虑到指标计算中 self.__logger.info("Begin to count by face_pose") sql = '' if is_lookback: sql = ''' SELECT class_id, face_id, action, COUNT(*) AS total FROM ( SELECT CONCAT(t1.college_name, t1.grade_name, t1.class_name) AS class_id, t1.face_id, t1.action FROM {2} t1 WHERE t1.dt >= '{0}' AND t1.dt <= '{1}' AND t1.course_name = 'rest' AND t1.action_type = {4} AND t1.face_id in ( SELECT t2.face_id FROM ( SELECT face_id, COUNT(*) AS num FROM {2} WHERE dt >= '{0}' AND dt <= '{1}' AND course_name = 'rest' GROUP BY face_id HAVING num >= {3} ) t2 ) ) tt GROUP BY class_id, face_id, action '''.format(day, CommonUtil.get_date_day(Config.LOOKBACKWINDOW), Config.INTERMEDIATE_TABLE_TRAIN, Config.DETECTED_LOWEST_LIMIT, Config.ACTION_TYPE['face_pose']) else: sql = ''' SELECT CONCAT(college_name, grade_name, class_name) AS class_id, face_id, action, COUNT(*) AS total FROM {1} WHERE dt = '{0}' AND action_type = {2} AND course_name != 'rest' GROUP BY class_id, face_id, action '''.format(day, Config.INTERMEDIATE_TABLE_TRAIN, Config.ACTION_TYPE['face_pose']) res = {} count = 0 for row in self.__db.select(sql): count += 1 key = row[0].encode('utf-8') if key not in res: res[key] = {} subKey = row[1].encode('utf-8') if subKey not in res[key]: res[key][subKey] = {} cnt = float(row[3]) if row[2] == '0': # 平视 res[key][subKey]['face_pose_normal'] = cnt elif row[2] == '1': # 左顾右盼 res[key][subKey]['face_pose_around'] = cnt elif row[2] == '2': # 低头 res[key][subKey]['face_pose_low'] = cnt else: continue self.__logger.debug("count_face_pose: " + str(res)) self.__logger.info( "Finished to count face_pose, and get total {0} records.".format( count)) return res