def comics_imgresource_handler(cls, feed_i_data): table = "api_imgresource" if feed_i_data: com_id = "com_id=%s" % feed_i_data[0]["com_id"] k, v_l = txt_handler.data_change(feed_i_data) msg = mysql_handler.MysqlHandler().create_sql(table, k, v_l) if msg: log_handler.write_log("%s %s" % (table, msg) % com_id) else: print("%s无需更新" % table)
def __init__(self): self.mq_handler = mysql_handler.MysqlHandler()
def resource_handler(cls, target_dir): # 从imgresource表中提取对应com_id的chap_id,形成字典 check_com_id_list = list(set([os.path.basename(os.path.dirname(i)) for i in cls.suspense_list])) check_table = "api_imgresource" exist_chap_dict = {} for i in check_com_id_list: re = mysql_handler.MysqlHandler().read_table(check_table, "com_id=%s" % i) if not re: tmp_chap_list = [] else: tmp_chap_list = [str(j["chap_id"]) for j in re] exist_chap_dict[i] = tmp_chap_list # 处理漫画每个语言 while True: # if len(cls.suspense_list): directory = cls.suspense_list.pop() else: print("全部数据以加载完成") break lang = os.path.basename(directory) com_id = os.path.basename(os.path.dirname(directory)) if com_id in cls.exist_comics_list: is_exist = 1 else: is_exist = 0 # 提取文件 sub_dir_name_list = os.listdir(directory) print("%s 语言子文件" % com_id, sub_dir_name_list) chap_cover_img_dict = {} necessary_file_list = ["comics_detail.txt", "com_cover_img.jpg", "chap_title_list.txt"] for file_name in sub_dir_name_list: if file_name == "comics_detail.txt": necessary_file_list.remove(file_name) data = txt_handler.read_comics_txt(os.path.join(directory, file_name)) elif file_name == "com_cover_img.jpg": necessary_file_list.remove(file_name) com_cover_img = os.path.splitext(file_name)[0] elif file_name == "chap_title_list.txt": necessary_file_list.remove(file_name) chap_dict = txt_handler.read_chapter_txt(os.path.join(directory, file_name)) elif os.path.isdir(os.path.join(directory, file_name)): # cls.chap_dir_list.append(os.path.join(directory, file_name)) name_list = os.listdir(os.path.join(directory, file_name)) for fn in name_list: if fn == "chap_cover_img.jpg": chap_cover_img = fn chap_cover_img_dict[file_name] = chap_cover_img # 是否有必须文件否则报错 if necessary_file_list: #print(necessary_file_list) log_handler.write_log( "%s缺少运行必须文件,例如:'comics_detail.txt', 'com_cover_img.jpg','chap_title_list.txt'" % com_id) continue # 统计该语言文件夹下有多少个文件夹 c = txt_handler.count_dir(directory) chap_list = list(chap_dict.keys()) if c != len(chap_list): log_handler.write_log("%s章节数与文件夹数不相等" % com_id) continue # api_imgresource exist_chap_list = exist_chap_dict[com_id] create_chap_list = list(set(chap_list).difference(set(exist_chap_list))) print(create_chap_list, "有效章节列表", len(chap_list)) # 生成有效章节的价格列表 data_price = data["chapter_list"][0].get("price", "") if data_price: suspense_price_dict = eval(data["chapter_list"][0]["price"])[0] else: suspense_price_dict = {} free_chap =[] for i in range(data["free_chapter"]): free_chap.append(str(i+1)) suspense_price_dict["0.00"] = free_chap default_price = data["default_price"] create_price_dict = txt_handler.create_price_list(suspense_price_dict, create_chap_list, default_price) feed_i_data = [] for i in create_chap_list: tmp = {"com_id": data["com_id"], "%s_title" % lang: chap_dict[i], "chap_cover_img": chap_cover_img_dict.get(i, ""), "chap_id": int(i), "%s_img_list_path" % lang: "%s/%s/%s" % (data["com_id"], lang, i), "price": create_price_dict[i] } feed_i_data.append(tmp) cls.comics_imgresource_handler(feed_i_data) # api_category feed_ca_data = [{"com_id": data["com_id"], "category": data["category"]}] cls.comics_category_handler(feed_ca_data, is_exist) # api_search feed_s_data = [{"com_id": data["com_id"], "%s_title" % lang: data["title"], "%s_author" % lang: data["author"], "%s_subtitle" % lang: data["subtitle"], "%s_introduction" % lang: data["introduction"] }] cls.comics_search_handler(feed_s_data, is_exist) # api_cominfo feed_co_data = [{"com_id": data["com_id"], "%s_com_cover_img" % lang: com_cover_img, "free_chapter": data["free_chapter"], "total_chapter": txt_handler.count_dir(target_dir + "/%s/%s" % (com_id, lang)), "download": 0, "created": time.strftime("%Y-%m-%d %H:%M:%S", time.gmtime(time.time())), "modified": time.strftime("%Y-%m-%d %H:%M:%S", time.gmtime(time.time())), "status": data["status"], "category": data["category"] }] cls.comics_info_handler(feed_co_data, is_exist)
def show_plot(_session_identifier, _classification, _remote=False, _hist=False, _legend=False): """ if script run from remote server allow x11 forwarding """ if _remote: matplotlib.use('tkagg') """ init mysql instance """ db = mysql_handler.MysqlHandler() mycursor = db.get_cursor(True) """ query session by _session_identifier """ sessions_query = """SELECT * FROM griiip_dev_staging.tracksessions WHERE {}""".format( _session_identifier) mycursor.execute(sessions_query) session = mycursor.fetchone() session_start_time = session['timeStart'] # session start date session_end_date = session['timeEnd'] # session end date """ query lap name from driverlaps table that was in the relevant session by the end and start date of the session """ lapNames_query = "SELECT lapName, lapTime FROM driverlaps WHERE classification = {} and lapStartDate between %s and %s".format( "'" + _classification + "'") mycursor.execute(lapNames_query, (session_start_time, session_end_date)) laps_from_driverLaps = mycursor.fetchall() lapsDict = { lap['lapName']: str(lap['lapTime']) for lap in laps_from_driverLaps } lapNames = [x['lapName'] for x in laps_from_driverLaps] if _remote is False: lapNames = lapNames[1:10] # create laps_from_driverLaps array _lapNames_tuple = tuple(lapNames) # convert the array to tuple if len(lapNames) == 0: raise Exception('there is no laps in that session and _classification') """ create query to select lap name and require fields ( throttle ) from driverlapsrundata table """ runData_sql_for_dataFrame = 'SELECT throttle, lapName FROM griiip_dev_staging.driverlapsrundata where lapName IN {} order by lapName'.format( _lapNames_tuple) """ create pandas dataFrame from the runData_sql_for_dataFrame results to be able to show on plots """ sqlEngine = create_engine( db.get_connection_string()) # using sqlalchemy to query dbConnection = sqlEngine.connect() frame = pd.read_sql(runData_sql_for_dataFrame, dbConnection) """ log all laps with the lap time """ print('laps logs:') print(*lapsDict.items(), sep='\n') for lap in lapNames: subset = frame[frame['lapName'] == lap] # Draw the density plot if _legend: sns.distplot(subset['throttle'], hist=_hist, kde=True, kde_kws={'linewidth': 1}, label=lap) plt.legend(prop={'size': 16}, title='laps') else: sns.distplot(subset['throttle'], hist=_hist, kde=True, kde_kws={'linewidth': 1}) # ,label=lap) plt.title('Density Plot with Multiple laps') plt.xlabel('throttle') plt.ylabel('num of laps') # fig = plt.figure() # fig.savefig('temp.png', transparent=True) plt.show()
from datetime import datetime from functools import reduce import mysql_handler import sys from session_speed.session_helper import * from session_speed.object_bin import * import os import operator if __name__ == '__main__': db = mysql_handler.MysqlHandler() myCursor = db.get_cursor() db_sessions = db.get( """SELECT ts.timeStart, ts.timeEnd, ts.id FROM tracksessions as ts left join trackevents as t_events on TrackEventId = t_events.id where sessionType = "Race" and SeriesId = 1""", use_dict_cursor=True) try: if len(db_sessions) == 0: # if there is no sessions raise exception raise ZeroSessionException('there is no sessions') betweenStr = get_sessions_date_between_str(db_sessions) # get all relevant session participants participants = get_sessions_participants(betweenStr, db) lapsByStartDateDict = get_sessions_by_satrte_date( participants, betweenStr, myCursor) sessions = create_sessions(db_sessions, lapsByStartDateDict) # forEach session set the best time of the worst driver (session.worstLap)