def plandoRateWebService(self): if self.vars.plando == None: raiseHttp(400, "Missing parameter plando") plando = self.vars.plando if self.vars.rate == None: raiseHttp(400, "Missing parameter rate") rate = self.vars.rate if IS_LENGTH(maxsize=32, minsize=1)(plando)[1] is not None: raiseHttp(400, "Plando name must be between 1 and 32 characters") if IS_MATCH('^[a-zA-Z0-9 -_]*$')(plando)[1] is not None: raiseHttp(400, "Plando name can only contain [a-zA-Z0-9 -_]") if IS_INT_IN_RANGE(1, 6)(rate)[1] is not None: raiseHttp(400, "Rate name must be between 1 and 5") rate = int(rate) ip = self.request.client with DB() as db: db.addRating(plando, rate, ip) newRate = db.getPlandoRate(plando) if newRate == None: raiseHttp(400, "Can't get new rate") newCount = newRate[0][0] newRate = float(newRate[0][1]) data = { "msg": "", "purePlandoName": re.sub('[\W_]+', '', plando), "rate": newRate, "count": newCount } return json.dumps(data)
def downloadPlandoWebService(self): if self.vars.plando is None: raiseHttp(400, "Missing parameter plando") plandoName = self.vars.plando if IS_LENGTH(maxsize=32, minsize=1)(plandoName)[1] is not None: raiseHttp(400, "Plando name must be between 1 and 32 characters") if IS_MATCH('^[a-zA-Z0-9 -_]*$')(plandoName)[1] is not None: raiseHttp(400, "Plando name can only contain [a-zA-Z0-9 -_]") ipsFileName = os.path.join(ipsBasePath, "{}.ips".format(plandoName)) if not os.path.isfile(ipsFileName): raiseHttp(400, "Plando ips not found on server") with open(ipsFileName, 'rb') as ipsFile: ipsData = ipsFile.read() with DB() as db: maxSize = db.getPlandoIpsMaxSize(plandoName) db.increaseDownloadCount(plandoName) data = { "ips": base64.b64encode(ipsData).decode(), "fileName": "{}.sfc".format(plandoName), "maxSize": maxSize } return json.dumps(data)
def run(self): weeks = 1 with DB() as db: solverPresets = db.getSolverPresets(weeks) randomizerPresets = db.getRandomizerPresets(weeks) solverDurations = db.getSolverDurations(weeks) randomizerDurations = db.getRandomizerDurations(weeks) solverData = db.getSolverData(weeks) randomizerData = db.getRandomizerData(weeks) isolver = db.getISolver(weeks) isolverData = db.getISolverData(weeks) spritesData = db.getSpritesData(weeks) shipsData = db.getShipsData(weeks) plandoRandoData = db.getPlandoRandoData(weeks) randomizerParamsStats = db.getRandomizerParamsStats(weeks) errors = self.getErrors() (fsStatus, fsPercent) = self.getFsUsage() return dict(solverPresets=solverPresets, randomizerPresets=randomizerPresets, solverDurations=solverDurations, randomizerDurations=randomizerDurations, solverData=solverData, randomizerData=randomizerData, randomizerParamsStats=randomizerParamsStats, isolver=isolver, isolverData=isolverData, spritesData=spritesData, shipsData=shipsData, errors=errors, fsStatus=fsStatus, fsPercent=fsPercent, plandoRandoData=plandoRandoData)
def deletePlandoWebService(self): for param in ["plandoName", "plandoKey"]: if self.vars[param] == None: raiseHttp(400, "Missing parameter {}".format(param)) plandoName = self.vars.plandoName plandoKey = self.vars.plandoKey if IS_LENGTH(maxsize=32, minsize=1)(plandoName)[1] is not None: raiseHttp(400, "Plando name must be between 1 and 32 characters") if IS_MATCH('^[a-zA-Z0-9 -_]*$')(plandoName)[1] is not None: raiseHttp(400, "Plando name can only contain [a-zA-Z0-9 -_]") if IS_LENGTH(maxsize=8, minsize=1)(plandoKey)[1] is not None: raiseHttp(400, "Plando key must be between 1 and 8 characters") if IS_MATCH('^[a-zA-Z0-9]*$')(plandoKey)[1] is not None: raiseHttp(400, "Plando key can only contain [a-zA-Z0-9]") with DB() as db: valid = db.isValidPlandoKey(plandoName, plandoKey) if valid is None or len(valid) == 0: raiseHttp(400, "Plando key mismatch") db.deletePlandoRating(plandoName) db.deletePlando(plandoName) return json.dumps("Plando {} deleted".format(plandoName))
def run(self): with DB() as db: url = self.request.env.request_uri.split('/') msg = "" plandos = [] expand = True if len(url) > 0 and url[-1] != 'plandorepo': # a plando name was passed as parameter plandoName = url[-1] # decode url plandoName = urllib.parse.unquote(plandoName) # sanity check if IS_MATCH('^[a-zA-Z0-9 -_]*$')(plandoName)[1] is not None: msg = "Plando name can only contain [a-zA-Z0-9 -_]" else: plandos = db.getPlando(plandoName) if plandos is None or len(plandos) == 0: msg = "Plando not found" if plandos is None or len(plandos) == 0: # get plando list plandos = db.getPlandos() expand = False return dict(plandos=plandos, msg=msg, expand=expand, math=math, re=re)
def uploadPlandoWebService(self): with DB() as db: count = db.getPlandoCount() plandoLimit = 2048 if count is None or count[0][0] >= plandoLimit: raiseHttp(400, "Maximum number of plandos reach: {}".format(plandoLimit)) for param in ["author", "plandoName", "longDesc", "preset", "romData"]: if self.vars[param] == None: raiseHttp(400, "Missing parameter {}".format(param)) for param in ["author", "plandoName", "preset"]: if IS_LENGTH(maxsize=32, minsize=1)(self.vars[param])[1] is not None: raiseHttp(400, "{} must be between 1 and 32 characters".format(param)) for param in ["longDesc"]: if IS_LENGTH(maxsize=2048, minsize=1)(self.vars[param])[1] is not None: raiseHttp(400, "{} must be between 1 and 2048 characters".format(param)) plandoName = self.vars.plandoName if IS_MATCH('^[a-zA-Z0-9 -_]*$')(plandoName)[1] is not None: raiseHttp(400, "Plando name can only contain [a-zA-Z0-9 -_]") # check if plando doesn't already exist with DB() as db: check = db.checkPlando(plandoName) if check is not None and len(check) > 0 and check[0][0] == plandoName: raiseHttp(400, "Can't create plando, a plando with the same name already exists") author = self.vars.author longDesc = self.removeHtmlTags(self.vars.longDesc) preset = self.vars.preset maxSize = self.handleIps(plandoName, self.vars.romData) updateKey = self.generateUpdateKey() with DB() as db: db.insertPlando((plandoName, author, longDesc, preset, updateKey, maxSize)) if webhookAvailable: self.plandoWebhook(plandoName, author, preset, longDesc) return json.dumps(updateKey)
def __init__(self, bot, client, torrentsQueue: Queue, ytQueue, megaQueue): self.bot = bot self.client = client self.ytQueue = ytQueue self.megaQueue = megaQueue self.torrentsQueue = torrentsQueue self.utils = Utils() self.db = DB()
def __init__(self, bot, client, torrentsQueue: Queue, megaQueue: Queue, ytQueue: Queue): self.bot = bot self.client = client self.torrentsQueue = torrentsQueue self.megaQueue = megaQueue self.ytQueue = ytQueue self.utils = Utils() self.db = DB() self.logger = logging.getLogger(' Admin Conv ')
def randoParamsWebService(self): # get a json string of the randomizer parameters for a given seed. # seed is the id in randomizer table, not actual seed number. if self.vars.seed == None: raiseHttp(400, "Missing parameter seed", True) seed = getInt(self.request, 'seed', False) if seed < 0 or seed > sys.maxsize: raiseHttp(400, "Wrong value for seed", True) with DB() as db: (seed, params) = db.getRandomizerSeedParams(seed) return json.dumps({"seed": seed, "params": params})
def __init__(self, bot): f = open('config.json', 'r') self.config = json.load(f) self.bot = bot self.db = DB() jobstore = { 'mongo': MongoDBJobStore(database='posts', collection='posts') } self.sched = AsyncIOScheduler( jobstores=jobstore) self.sched.start() app_location = os.getcwd() self.download_location = f'{app_location}/images' if not os.path.exists(self.download_location): os.makedirs(self.download_location)
def computeDifficulty(self, jsonRomFileName, preset): randomizedRom = os.path.basename(jsonRomFileName.replace( 'json', 'sfc')) presetFileName = "{}/{}.json".format(getPresetDir(preset), preset) (fd, jsonFileName) = tempfile.mkstemp() db = DB() id = db.initSolver() params = [ getPythonExec(), os.path.expanduser("~/RandomMetroidSolver/solver.py"), '-r', str(jsonRomFileName), '--preset', presetFileName, '--difficultyTarget', str(self.session.solver['difficultyTarget']), '--pickupStrategy', self.session.solver['pickupStrategy'], '--type', 'web', '--output', jsonFileName, '--runtime', '10' ] for item in self.session.solver['itemsForbidden']: params += ['--itemsForbidden', item] db.addSolverParams(id, randomizedRom, preset, self.session.solver['difficultyTarget'], self.session.solver['pickupStrategy'], self.session.solver['itemsForbidden']) print("before calling solver: {}".format(params)) start = datetime.now() ret = subprocess.call(params) end = datetime.now() duration = (end - start).total_seconds() print("ret: {}, duration: {}s".format(ret, duration)) if ret == 0: with open(jsonFileName) as jsonFile: result = json.load(jsonFile) else: result = "Solver: something wrong happened while solving the ROM" db.addSolverResult(id, ret, duration, result) db.close() os.close(fd) os.remove(jsonFileName) return (ret == 0, result)
def main(): f = open('config.json', 'r') config = json.load(f) dbQueue = Queue() db = DB(dbQueue) tweetsQueue = Queue() extendedQueue = Queue() monitor = Monitor(tweetsQueue, extendedQueue) threading.Thread(target=db.start, name='Database Thread').start() threading.Thread(target=monitor.start, name='Monitor Thread').start() for i in range(50): filteringThread = FilterTweets(f'Filter {i}', tweetsQueue, dbQueue, config.get("SET1"), config.get("SET2"), config.get("SET3"), config.get("DISCORD_CHANNEL_WEBHOOK")) threading.Thread(target=filteringThread.start, name=f'Tweets Filter Thread {i}').start() print(' Bot is up!')
def updatePlandoWebService(self): for param in ["author", "plandoName", "longDesc", "preset", "plandoKey"]: if self.vars[param] == None: raiseHttp(400, "Missing parameter {}".format(param)) for param in ["author", "plandoName", "preset"]: if IS_LENGTH(maxsize=32, minsize=1)(self.vars[param])[1] is not None: raiseHttp(400, "{} must be between 1 and 32 characters".format(param)) for param in ["plandoKey"]: if IS_LENGTH(maxsize=8, minsize=1)(self.vars[param])[1] is not None: raiseHttp(400, "{} must be between 1 and 8 characters".format(param)) for param in ["longDesc"]: if IS_LENGTH(maxsize=2048, minsize=1)(self.vars[param])[1] is not None: raiseHttp(400, "{} must be between 1 and 2048 characters".format(param)) plandoName = self.vars.plandoName if IS_MATCH('^[a-zA-Z0-9 -_]*$')(plandoName)[1] is not None: raiseHttp(400, "Plando name can only contain [a-zA-Z0-9 -_]") author = self.vars.author longDesc = self.removeHtmlTags(self.vars.longDesc) preset = self.vars.preset plandoKey = self.vars.plandoKey # check update key with DB() as db: valid = db.isValidPlandoKey(plandoName, plandoKey) if valid is None or len(valid) == 0: raiseHttp(400, "Plando key mismatch") if self.vars.romData is not None: print("updatePlandoWebService: update ips") maxSize = self.handleIps(plandoName, self.vars.romData) db.updatePlandoAll((author, longDesc, preset, maxSize, plandoName)) else: db.updatePlandoMeta((author, longDesc, preset, plandoName)) return json.dumps("Plando {} updated succesfully.".format(plandoName))
def getSkillLevelBarData(self, preset): result = {'name': preset} try: params = PresetLoader.factory('{}/{}.json'.format( getPresetDir(preset), preset)).params result['custom'] = (preset, params['score']) # add stats on the preset result['knowsKnown'] = len([ know for know in params['Knows'] if params['Knows'][know][0] == True ]) except: result['custom'] = (preset, 'N/A') result['knowsKnown'] = 'N/A' # get score of standard presets standardScores = self.cache.ram('standardScores', lambda: dict(), time_expire=None) if not standardScores: for preset in [ 'newbie', 'casual', 'regular', 'veteran', 'expert', 'master', 'samus' ]: score = PresetLoader.factory('{}/{}.json'.format( getPresetDir(preset), preset)).params['score'] standardScores[preset] = score result['standards'] = standardScores with DB() as db: result['lastAction'] = db.getPresetLastActionDate( result['custom'][0]) # TODO: normalize result (or not ?) return result
def __init__(self): print('connect') self.connect = DB().conn()
setup() f = open('config.json', 'r') config = json.load(f) API_ID = config.get('API_ID') API_HASH = config.get('API_HASH') PHONE_NUMBER = config.get('PHONE_NUMBER') BOT_TOKEN = config.get('BOT_TOKEN') client = TelegramClient('./sessionFiles/client', API_ID, API_HASH) client.parse_mode = 'html' bot = TelegramClient('./sessionFiles/bot', API_ID, API_HASH) torrentsQueue = Queue() megaQueue = Queue() ytQueue = Queue() db = DB() async def startAdminConv(event): conversation = AdminConversation(bot, client, torrentsQueue, megaQueue, ytQueue) await conversation.start(event) raise StopPropagation async def startUserConv(event): conversation = UserConversation(bot, client, torrentsQueue, ytQueue, megaQueue) await conversation.start(event) raise StopPropagation
def run(self): self.initProgSpeedStatsSession() if self.vars.action == 'Load': (ok, msg) = self.validateProgSpeedStatsParams() if not ok: self.session.flash = msg redirect(URL(r=self.request, f='progSpeedStats')) self.updateProgSpeedStatsSession() skillPreset = "Season_Races" randoPreset = "Season_Races" majorsSplit = self.vars.majorsSplit with DB() as db: progSpeedStatsRaw = {} progSpeedStats = {} progSpeedStats["open14"] = {} progSpeedStats["open24"] = {} progSpeedStats["open34"] = {} progSpeedStats["open44"] = {} progSpeeds = [ 'speedrun', 'slowest', 'slow', 'medium', 'fast', 'fastest', 'basic', 'variable', 'total' ] realProgSpeeds = [] realProgSpeedsName = [] for progSpeed in progSpeeds: curRandoPreset = "{}_{}_{}".format(randoPreset, majorsSplit, progSpeed) progSpeedStatsRaw[progSpeed] = db.getProgSpeedStat( skillPreset, curRandoPreset) if len(progSpeedStatsRaw[progSpeed]) != 0: progSpeedStats[progSpeed] = {} progSpeedStats[progSpeed]["avgLocs"] = transformStats( progSpeedStatsRaw[progSpeed]["avgLocs"], 50) open14 = transformStats( progSpeedStatsRaw[progSpeed]["open14"]) open24 = transformStats( progSpeedStatsRaw[progSpeed]["open24"]) open34 = transformStats( progSpeedStatsRaw[progSpeed]["open34"]) open44 = transformStats( progSpeedStatsRaw[progSpeed]["open44"]) progSpeedStats[progSpeed]["open"] = zipStats( [open14, open24, open34, open44]) progSpeedStats[progSpeed]["open"].insert( 0, [ 'Collected items', '1/4 locations available', '2/4 locations available', '3/4 locations available', '4/4 locations available' ]) progSpeedStats["open14"][progSpeed] = open14 progSpeedStats["open24"][progSpeed] = open24 progSpeedStats["open34"][progSpeed] = open34 progSpeedStats["open44"][progSpeed] = open44 realProgSpeeds.append(progSpeed) if progSpeed == 'total': realProgSpeedsName.append('total_rando') else: realProgSpeedsName.append(progSpeed) # avg locs if len(realProgSpeeds) > 0: progSpeedStats['avgLocs'] = zipStats([ progSpeedStats[progSpeed]["avgLocs"] for progSpeed in realProgSpeeds ]) progSpeedStats["avgLocs"].insert(0, ['Available locations'] + realProgSpeedsName) # prog items if len(progSpeedStats["open14"]) > 0: progSpeedStats["open14"] = zipStats([ progSpeedStats["open14"][progSpeed] for progSpeed in realProgSpeeds ]) progSpeedStats["open14"].insert(0, ['Collected items'] + realProgSpeedsName) progSpeedStats["open24"] = zipStats([ progSpeedStats["open24"][progSpeed] for progSpeed in realProgSpeeds ]) progSpeedStats["open24"].insert(0, ['Collected items'] + realProgSpeedsName) progSpeedStats["open34"] = zipStats([ progSpeedStats["open34"][progSpeed] for progSpeed in realProgSpeeds ]) progSpeedStats["open34"].insert(0, ['Collected items'] + realProgSpeedsName) progSpeedStats["open44"] = zipStats([ progSpeedStats["open44"][progSpeed] for progSpeed in realProgSpeeds ]) progSpeedStats["open44"].insert(0, ['Collected items'] + realProgSpeedsName) else: progSpeedStats = None majorsSplit = ['Major', 'Full'] return dict(majorsSplit=majorsSplit, progSpeedStats=progSpeedStats)
output_layer_1_size = 80 output_size = 1 lstm_depth = 3 output_keep_prob = 0.5 input_keep_prob = 1.0 l2_regularization_rate = 0.001 is_training = 0 is_multi_layer = 1 is_predict_by_timestep = 0 ''' DB config ''' system = 'BOCOP-*' db_eops = DB(host='192.168.130.30', port=3306, user='******', passwd='Password01!', database='eops') ''' model source ''' model_dir = 'ckpt_169d_8f_306' print('Using model from %s.' % model_dir) print('Using dict from %s' %(params.project_dir + '/' + params.dict_dir)) # 恢复标准化的交易量 normalized_count_data_dir = params.project_dir + '/' + params.dict_dir + '/normalized_count.dict' with open(normalized_count_data_dir, 'r') as f: data = f.readlines() count_mean = float(data[0]) count_std = float(data[1]) '''
def createOwner(): clear() name = raw_input("Nazwa firmy: ") street = raw_input("Ulica: ") house_number = raw_input("Numer domu: ") flat_number = raw_input("Numer mieszkania: ") provinces = DB().get_province_list() for province in DB().get_province_list(): print u'{}'.format(province.__str__().decode("utf-8")) print u'Podaj numer województwa:', province = provinces[int(raw_input()) - 1] print u'Wybrano: {}'.format(province.name) print u'Wyszukaj miejscowość: ', cities = db.search_city(province, raw_input()) if len(cities) > 1: print u'Znaleziono więcej miast, wybierz jedno z poniższych: ' for i, city in enumerate(cities): print u'{}. {}'.format(i + 1, city.as_string()) print u'Wybierz miejscowość: ', city = cities[int(raw_input()) - 1] print u'Wybrano: {}, {}'.format(city.city, city.commune) else: city = cities[0] print u'Znaleziono: {}'.format(city.city) province_id = province.id province = city.province district = city.district commune = city.city city = city.city postal = raw_input("Kod pocztowy: ") nip = raw_input("NIP/VAT ID: ") troublemaker = raw_input( "Wpisz miasto urzedu skarbowego (puste, jesli to samo co wyzej): ") if len(troublemaker) == 0: troublemaker = city troublemaker = db.search_for_trouble(province_id, troublemaker) if len(troublemaker) > 1: print u'Znaleziono więcej urzędów, wybierz jeden z poniższych: ' for i, x in enumerate(troublemaker): print u'{}. {}'.format(i + 1, x[1]) print u'Wybierz urząd:', troublemaker = troublemaker[int(raw_input()) - 1] print u'Wybrano: {}'.format(troublemaker[1]) else: troublemaker = troublemaker[0] print u'Znaleziono: {}'.format(troublemaker[1]) troublemaker = troublemaker[0] bank_name = raw_input("Nazwa banku: ") account = raw_input("Numer konta: ") swift = raw_input("Numer SWIFT (tylko FVAT UE, PL puste):") print('''Numerowanie faktur: 1. Roczne 2. Miesieczne''') annual_number = Getch().__call__() == '1' transfer = "Przelew" save_name = raw_input("Nazwa (nazwa pliku, bez spacji): ") create( Settings.owner_file(u'{}.json'.format(save_name)), Owner(name, Address(postal, city, province, district, commune, street, house_number, flat_number), nip, gov_code=troublemaker, annual_number=annual_number, account=Account(bank_name, account, swift, transfer))) clear() print("Owner {} utworzony".format(save_name)) step1()
import ctypes ctypes.windll.shell32.SetCurrentProcessExplicitAppUserModelID("polyplayer") from PyQt5.QtWidgets import QMainWindow, QApplication, QFileDialog, QStyle, QStyleFactory, QLineEdit, QListView, \ QWidget, QTableWidgetItem, QCheckBox, QComboBox from PyQt5.QtCore import QThread, Qt, pyqtSignal from PyQt5 import QtGui, QtCore from gui.ctrl_panel import Ui_MainWindow from api.pymusicdl_parser import MusicDL from api.audio_player import AudioPlayer from utils.logger import log from utils.db import DB db_thread = DB(cfg['db']) db_thread.connect() header_dict = { 'added': 36, 'downloaded': 36, 'title': 360, 'artist': 120, 'album': 120, 'duration': 80, 'filesize': 80, 'source': 80, } class MainWindow(QMainWindow, Ui_MainWindow):
def write_summary_sheet02(workbook, PROJ_DICT, WEEK, NEW_PROJECT, currentWeekSolveRate, currentWeekAddRate, correSituation): title = "开始编写【汇总】工作表" print(title.center(40, '=')) worksheet = workbook.add_worksheet('汇总') worksheet.hide_gridlines(option=2) # 隐藏网格线 style = Styles(workbook).style_of_cell() style_lightGray = Styles(workbook).style_of_cell('gray') # style_small_width = Styles(workbook).style_of_cell() worksheet.set_row(0, 20) # 设置行高 worksheet.set_column('A:A', 2) # 设置列宽 worksheet.set_column('B:B', 24) # 设置列宽 worksheet.merge_range(1, 1, 2, 1, '项目', style) projDict = {} for proj in PROJ_DICT: index = PROJ_DICT[proj]['index'] proj_name = PROJ_DICT[proj]['sheet_name'] projDict[proj] = index + 2 worksheet.write(index + 2, 1, proj_name, style) # 查询上周汇报时间 connect = DB().conn() sql = 'select project,week_num,statis_time,bug_leave_num,bug_total_num,bug_leave_rate,bug_add_num,bug_solve_num from wbg_history_data where week_num = %(week_num)s' # week_num = str(WEEK-1)+'周' value = {'week_num': str(WEEK - 1) + '周'} # print(WEEK) # print(type(WEEK)) totalDate = DB().search(sql, value, connect) beforeWeek = totalDate[0][2] beforeWeek = datetime.datetime.strftime(beforeWeek, '%m-%d').replace( '-', '月') + '日' worksheet.merge_range(1, 2, 1, 6, beforeWeek, style) worksheet.write(2, 2, '遗留数', style) worksheet.write(2, 3, '总数', style) worksheet.write(2, 4, '遗留率', style) worksheet.write(2, 5, '新增数', style) worksheet.write(2, 6, '解决数', style) # projDict = {'BIM': 3, 'EBID': 4, 'OA': 5, 'EDU': 6, 'FAS': 7, 'EVAL': 8} for i in totalDate: if i[0] in PROJ_DICT: worksheet.write(projDict[i[0]], 2, i[3], style) worksheet.write(projDict[i[0]], 3, i[4], style) worksheet.write(projDict[i[0]], 4, format(float(i[5]), '.1%'), style) worksheet.write(projDict[i[0]], 5, i[6], style) worksheet.write(projDict[i[0]], 6, i[7], style) for i in NEW_PROJECT: worksheet.write(projDict[i], 2, '—', style) worksheet.write(projDict[i], 3, '—', style) worksheet.write(projDict[i], 4, '—', style) worksheet.write(projDict[i], 5, '—', style) worksheet.write(projDict[i], 6, '—', style) # 查询本周汇报数据 sql = 'select project,week_num,statis_time,bug_leave_num,bug_total_num,bug_leave_rate,bug_add_num,bug_solve_num from wbg_history_data where week_num = %(week_num)s' value = {'week_num': WEEK} totalDate = DB().search(sql, value, connect) beforeWeek = totalDate[0][2] beforeWeek = datetime.datetime.strftime(beforeWeek, '%m-%d').replace( '-', '月') + '日' worksheet.merge_range(1, 7, 1, 11, beforeWeek, style) worksheet.write(2, 7, '遗留数', style) worksheet.write(2, 8, '总数', style) worksheet.write(2, 9, '遗留率', style) worksheet.write(2, 10, '新增数', style) worksheet.write(2, 11, '解决数', style) # projDict = {'BIM': 3, 'EBID': 4, 'OA': 5, 'EDU': 6, 'FAS': 7, 'EVAL': 8} for i in totalDate: if i[0] in PROJ_DICT: worksheet.write(projDict[i[0]], 7, i[3], style) worksheet.write(projDict[i[0]], 8, i[4], style) worksheet.write(projDict[i[0]], 9, format(float(i[5]), '.1%'), style) worksheet.write(projDict[i[0]], 10, i[6], style) worksheet.write(projDict[i[0]], 11, i[7], style) # 当周bug整体情况 worksheet.merge_range(1, 12, 1, 14, '当周bug整体状况', style_lightGray) worksheet.write(2, 12, '解决速率', style_lightGray) worksheet.write(2, 13, '新增速率', style_lightGray) worksheet.write(2, 14, '对应状况', style_lightGray) for proj in projDict: if proj in currentWeekSolveRate: worksheet.write(projDict[proj], 12, currentWeekSolveRate[proj], style_lightGray) else: worksheet.write(projDict[proj], 12, '0%', style_lightGray) if proj in currentWeekSolveRate: worksheet.write(projDict[proj], 13, currentWeekAddRate[proj], style_lightGray) else: worksheet.write(projDict[proj], 13, '0%', style_lightGray) if proj in correSituation: worksheet.write(projDict[proj], 14, correSituation[proj], style_lightGray) else: worksheet.write(projDict[proj], 14, '无应对', style_lightGray) for i in range(1, 9): worksheet.set_row(i, 22) print("【汇总】工作表编写完毕".center(40, '-'))
#db.save_heroes(replayData) # db.save_army_strength(replayData) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument('conf_file', help='Configuration file path') parser.add_argument('--replay_file', help='.StormReplay file to load') args = parser.parse_args() with file(args.conf_file) as f: conf = yaml.load(f) replayData = None db = DB(host=conf['db_host'], user=conf['db_user'], passwd=conf['db_pass'], db=conf['db']) if not args.replay_file: for directory, dirnames, filenames in walk( '/Users/cristiano/Others/log-crawler/download/'): for file in filenames: if file.endswith('StormReplay'): try: file_path = path.join(directory, file) print file_path replay = mpyq.MPQArchive(file_path) replayData = processEvents(protocol34835, replay, conf) save_to_db(replayData, file_path) except Exception, e: print "Error while trying to process %s: %s" % (
def Redmine_build_bug(self, proj, NEW_PROJECT, PROJ_DICT): """ :param tag: :param start_time: 07-01 :param end_time: 07-31 :return: bug字典,当前时间段bug总数 """ # project = self.redmine.project.get(tag) # start_time = time.strftime('%m-01', time.localtime(time.time())) # end_time = time.strftime('%m-%d', time.localtime(time.time())) start_time = time.strftime('%Y-%m-01', time.localtime( time.time())) + " 08:00:0" end_time = time.strftime('%Y-%m-%d', time.localtime( time.time())) + " 08:00:0" start_time = datetime.datetime.utcfromtimestamp( int(time.mktime(time.strptime( start_time, "%Y-%m-%d %H:%M:%S")))).strftime("%Y-%m-%d") end_time = datetime.datetime.utcfromtimestamp( int(time.mktime(time.strptime( end_time, "%Y-%m-%d %H:%M:%S")))).strftime("%Y-%m-%d") if proj in NEW_PROJECT: # 新建项目,统计所有已创建bug issues_list = self.redmine.issue.filter( project_id=PROJ_DICT[proj]['redmine_project_id'], status_id="*", tracker_id=PROJ_DICT[proj]['redmine_tracker_id'], ) # print(len(issues_list)) issues_list_before = self.redmine.issue.filter( project_id=PROJ_DICT[proj]['redmine_project_id'], status_id="*", tracker_id=PROJ_DICT[proj]['redmine_tracker_id'], created_on='<=' + str(start_time)) created_bug_num = len(issues_list) # 非新建项目,只统计当前月新建bug数量 print("【Redmine】%s 新建bug数量为:%d " % (proj, created_bug_num)) proj_before_bug_num = int(len(issues_list_before)) print("【Redmine】%s %s月份之前新建bug数量为:%d " % (proj, self.MONTH, created_bug_num)) if proj_before_bug_num > 0: connect = DB().conn() cur = connect.cursor() updateTime = datetime.datetime.now() PROJ_BEFORE_BUG = DbSearch().search_proj_before_bug() if proj in PROJ_BEFORE_BUG: sql = 'insert into wbg_proj_before_bug (proj,beforeNum,updateTime) values (%(proj)s,%(beforeNum)s,%(updateTime)s)' value = { 'proj': proj, 'beforeNum': proj_before_bug_num, 'updateTime': updateTime } # print('------------------------') # print(value) cur.execute(sql, value) connect.commit() return created_bug_num else: issues_list = self.redmine.issue.filter( project_id=PROJ_DICT[proj]['redmine_project_id'], status_id="*", tracker_id=PROJ_DICT[proj]['redmine_tracker_id'], created_on='><' + str(start_time) + '|' + str(end_time)) # print(len(issues_list)) created_bug_num = len(issues_list) # 当前新建bug数量 print("【Redmine】%s 新建bug数量为:%d " % (proj, created_bug_num)) return created_bug_num
help='number of items per insert call', default=10) parser.add_argument("--configfile", default="config.json", help="config file to use") # add db args with no default database DB.add_args(parser, None) args = parser.parse_args() # config file config = ConfigFile(args.configfile) # get db connection conn = DB(args=args, configfile=config) cur = conn.get_cursor() # first drop table if needed if args.drop_table: print "Dropping Table..." cur.execute("DROP TABLE IF EXISTS {};".format(args.table_name)) # Create table if args.create_table: print "Creating table..." create_table(cur, args.table_name) # status updater status_updater = StatusUpdater()
def __init__(self): self.YEAR = time.strftime('%Y', time.localtime(time.time())) self.MONTH = time.strftime('%m', time.localtime(time.time())) self.connect = DB().conn() self.WEEK = DbSearch().week()
def load_db(): stu_db = DB('stu') stu_db.select(TABLE_STU) stu_db.select(TABLE_COURSE) stu_db.select(STU_TABLE_COURSE) logger.info("Tables are created;")
def skillPresetActionWebService(self): print("skillPresetActionWebService call") if self.session.presets is None: self.session.presets = {} # for create/update, not load (ok, msg) = self.validatePresetsParams(self.vars.action) if not ok: raiseHttp(400, json.dumps(msg)) else: self.session.presets['currentTab'] = self.vars.currenttab if self.vars.action == 'Create': preset = self.vars.presetCreate else: preset = self.vars.preset # check if the presets file already exists password = self.vars['password'] password = password.encode('utf-8') passwordSHA256 = hashlib.sha256(password).hexdigest() fullPath = '{}/{}.json'.format(getPresetDir(preset), preset) if os.path.isfile(fullPath): # load it end = False try: oldParams = PresetLoader.factory(fullPath).params except Exception as e: msg = "UC:Error loading the preset {}: {}".format(preset, e) end = True if end == True: raiseHttp(400, json.dumps(msg)) # check if password match if 'password' in oldParams and passwordSHA256 == oldParams[ 'password']: # update the presets file paramsDict = self.genJsonFromParams(self.vars) paramsDict['password'] = passwordSHA256 try: PresetLoader.factory(paramsDict).dump(fullPath) with DB() as db: db.addPresetAction(preset, 'update') self.updatePresetsSession() msg = "Preset {} updated".format(preset) return json.dumps(msg) except Exception as e: msg = "Error writing the preset {}: {}".format(preset, e) raiseHttp(400, json.dumps(msg)) else: msg = "Password mismatch with existing presets file {}".format( preset) raiseHttp(400, json.dumps(msg)) else: # prevent a malicious user from creating presets in a loop if not self.maxPresetsReach(): # write the presets file paramsDict = self.genJsonFromParams(self.vars) paramsDict['password'] = passwordSHA256 try: PresetLoader.factory(paramsDict).dump(fullPath) with DB() as db: db.addPresetAction(preset, 'create') self.updatePresetsSession() # add new preset in cache (stdPresets, tourPresets, comPresets) = loadPresetsList(self.cache) comPresets.append(preset) comPresets.sort(key=lambda v: v.upper()) msg = "Preset {} created".format(preset) return json.dumps(msg) except Exception as e: msg = "Error writing the preset {}: {}".format(preset, e) raiseHttp(400, json.dumps(msg)) redirect(URL(r=request, f='presets')) else: msg = "Sorry, maximum number of presets reached, can't add more" raiseHttp(400, json.dumps(msg))
class Statistics(object): # options = {"server": "http://192.168.1.212:8088"} # auth = ("lig", "lig") # user_name:test user_passwd:test connect = DB().conn() """ type:缺陷管理工具类型 redmine_project_id:redmine接口识别的项目代号(查看redmine项目配置获取) redmine_tracker_id:redmine项目缺陷的跟踪代号(通过打印redmine单个issue的tracker_id获取) jira_tag:jira接口识别的项目代号(通过选择JIRA高级查询查看) jira_tracker_id:jira查询语句中代表该项目缺陷的代号,一般是“缺陷”/“故障”,新建项目默认为“缺陷”,通过Redmine_find_proj_conf.py脚本获取 title:项目单独报表的标题 index:项目在报表中的位置 sheet_name:项目工作簿名称 """ PROJ_DICT = config.PROJ_DICT ALL_PROJ_HISTORY_DATA = None TOTAL_DATA = DbSearch().search_allDate_history(PROJ_DICT) PROJ_BEFORE_BUG = {} # 新建项目之前未纳入wbs数据库的bug字典 NEW_BUG = {} # 所有项目新建bug字典 LEAVE_BUG = {} # 所有项目遗留bug字典 WEEK = DbSearch().week() # 当前周 print("当前周:%s" % WEEK) NEW_PROJECT = [] # 新增项目列表 currentWeekSolveRate = {} # 当前周解决速率 currentWeekAddRate = {} # 当前周新增速率 correSituation = {} # 当前周对应状况 filename = '每周项目测试缺陷状况%s.xlsx' % time.strftime('%Y-%m-%d_%H-%M-%S', time.localtime(time.time())) reportName = os.path.join(config.root_dir, 'report', filename) workbook = xlsxwriter.Workbook(reportName) # 类属性更新,一次从数据库获取关键数据 def data_init(self): """ EBID包含了JIRA和Redmine两部分的数据,从2019年12月份开始,天津专家库和公规院项目开始使用Redmine,所以要计算二者之和 """ print( "--------------------------data init-----------------------------") # dbs = DbSearch() # self.TOTAL_DATA = DbSearch().search_allDate_history(self.PROJ_DICT) # self.WEEK = DbSearch().week() # 更新NEW_PROJECT列表,将新加入的项目加入NEW_PROJECT列表 self.NEW_PROJECT = DbSearch().search_new_project( self.PROJ_DICT, self.WEEK, self.NEW_PROJECT) begintime = time.strftime('%Y-%m-01', time.localtime(time.time())) endtime = time.strftime('%Y-%m-%d', time.localtime(time.time())) print("统计时间:%s~%s" % (begintime, endtime)) for proj in self.PROJ_DICT: if self.PROJ_DICT[proj]['type'] == 'redmine': # redmine_tag = self.PROJ_DICT[proj]['redmine_tag'] self.NEW_BUG[proj] = RedmineSta().Redmine_build_bug( proj, self.NEW_PROJECT, self.PROJ_DICT) self.LEAVE_BUG[proj] = RedmineSta().Redmine_leave_bug( proj, self.PROJ_DICT) elif self.PROJ_DICT[proj]['type'] == 'jira': # jira_tag = self.PROJ_DICT[proj]['jira_tag'] self.NEW_BUG[proj] = JiraSta().JIRA_build_bug( proj, self.PROJ_DICT) self.LEAVE_BUG[proj] = JiraSta().JIRA_leave_bug( proj, self.PROJ_DICT) # todo: 某些项目在jira和redmine都存有数据,将redmine和jira的issue情况存于不同的数据表,方便处理 self.PROJ_BEFORE_BUG = DbSearch().search_proj_before_bug() print('项目之前新建bug', self.PROJ_BEFORE_BUG) def main(self): self.data_init() self.currentWeekSolveRate, self.currentWeekAddRate, self.correSituation = write_summaryChart_sheet01( self.workbook, self.NEW_BUG, self.LEAVE_BUG, self.TOTAL_DATA, self.PROJ_DICT, self.NEW_PROJECT, self.PROJ_BEFORE_BUG, self.currentWeekSolveRate, self.currentWeekAddRate, self.correSituation, self.WEEK) write_summary_sheet02(self.workbook, self.PROJ_DICT, self.WEEK, self.NEW_PROJECT, self.currentWeekSolveRate, self.currentWeekAddRate, self.correSituation) # sta.write_BIM_sheet() write_Proj_sheet(self.workbook, self.PROJ_DICT, self.PROJ_BEFORE_BUG, self.LEAVE_BUG) self.workbook.close()
def main(*args): if NUM_OF_VARS != len(args): exit("\nLength of variables does not match arguments passed to main()") method = args[0] fos_trial = args[1] soil_cohesion = args[2] inter_fric = args[3] numslices = args[4] water_pres = args[5] vslice = args[6] percent = args[7] verbose = args[8] save = args[9] show = args[10] do_crit_slope = args[11] ellip_coor = args[12] delimiter = args[13] bulk = args[14] f_config = args[15] f_data = args[16] variables = { 'method' : method, 'fos_trial' : fos_trial, 'soil_cohesion' : soil_cohesion, 'internal_friction_angle' : inter_fric, 'num_of_slices' : numslices, 'water_pressure' : water_pres, 'vslice' : vslice, 'percentage_status' : percent, 'verbose' : verbose, 'save_figure' : save, 'show_figure' : show, 'perform_critical_slope' : do_crit_slope, 'ellipse_coordinates' : ellip_coor, 'delimiter' : delimiter, 'bulk_density' : bulk, 'f_config' : f_config, 'f_data' : f_data } # read values from config files config_values = ReadConfig(f_config, variables.keys()).return_variables() # sort through None types and replace with value from config file for var in variables.keys(): if variables[var] is not None: continue for opt in config_values.keys(): if var == opt: variables[var] = config_values[opt] # create variable storage object config = DB(data_manipulation(variables)) # preview geometry if config.show_figure: previewGeometry(config) # create working space sprofile, circle, circ_coords = create.working_space(config, format.load_profile_data(config)) # perform slope stability calculation fos, errstr = perform.calculation(config, sprofile, circle) # plot final diagram if config.show_figure: profile = format.linspace2d(format.load_profile_data(config), config.num_of_slices) plt.scatter(circ_coords[:,0], circ_coords[:,1], color="red") plt.plot(sprofile[:,0], sprofile[:,1], color="green") plt.plot (profile[:,0], profile[:,1], color="green") if config.save_figure: plt.savefig('slope_profile.tif') #plt.show() print fos, errstr #display.results(config, results)
def write_Proj_sheet(workbook, PROJ_DICT, PROJ_BEFORE_BUG, LEAVE_BUG): YEAR = time.strftime('%Y', time.localtime(time.time())) # MONTH = time.strftime('%m', time.localtime(time.time())) # WEEK = DbSearch().week() # 当前周 connect = DB().conn() PROJ_DICT_sorted = sorted(PROJ_DICT.items(), key=lambda PROJ_DICT: PROJ_DICT[1]['index']) for proj in PROJ_DICT_sorted: proj = proj[0] title = "开始编写【%s】工作表" % PROJ_DICT[proj]['sheet_name'] print(title.center(40, '=')) worksheet = workbook.add_worksheet(PROJ_DICT[proj]['sheet_name']) worksheet.hide_gridlines(option=2) # 隐藏网格线 style = Styles(workbook).style_of_cell() style_title = Styles(workbook).style_of_cell('14') # style_title2 = style_of_cell('noBold') # style_bold = style_of_cell('bold') sql = "select year from wbg_year_sta where project=%(proj)s" value = {'proj': proj} result = DB().search(sql, value, connect) # print(result) yearList = [] # [2017,2018,2019] for i in result: if int(i[0]) not in yearList: yearList.append(int(i[0])) yearList.sort() print(yearList) worksheet.write('B2', PROJ_DICT[proj]['sheet_name'], style_title) worksheet.write(3, 1, '年份', style) worksheet.write(3, 2, '遗留BUG数', style) worksheet.write(3, 3, '新建BUG总数', style) worksheet.write(3, 4, '遗留率', style) r = len(yearList) for index in range(r): worksheet.write(4 + index, 1, yearList[index], style) sql = "select year,leave_bug_num,build_bug_num from wbg_year_sta where project=%(proj)s and year=%(year)s" value = {'year': yearList[index], 'proj': proj} result = DB().search(sql, value, connect) lbn = 0 nbn = 0 for i in result: lbn += i[1] nbn += i[2] worksheet.write(4 + index, 2, lbn, style) if proj in PROJ_BEFORE_BUG: nbn += PROJ_BEFORE_BUG[proj] worksheet.write(4 + index, 3, nbn, style) if nbn == 0: worksheet.write(4 + index, 4, 0, style) else: worksheet.write(4 + index, 4, format(int(lbn) / int(nbn), '.1%'), style) style_1 = Styles(workbook).style_of_cell(1) # 本年bug状况 worksheet.merge_range(5 + r, 1, 5 + r, 3, '%s年BUG状况' % YEAR, style_1) worksheet.write(6 + r, 1, '月份', style) worksheet.write(6 + r, 2, '遗留BUG数', style) worksheet.write(6 + r, 3, '新建BUG总数', style) worksheet.write(6 + r, 4, '遗留率', style) sql = "select month,leave_bug_num,build_bug_num from wbg_year_sta where project=%(proj)s and year=%(year)s" value = {'proj': proj, 'year': YEAR} result = DB().search(sql, value, connect) # print(result) monthdict = {} for i in result: monthdict[int(i[0])] = (i[1], i[2]) print(monthdict) row = len(monthdict) # 9/3 monthdict = sorted(monthdict.items(), key=lambda x: x[0]) # currentMonth = datetime.datetime.now().month index = 0 for j in monthdict: mlbn = j[1][0] mbbn = j[1][1] month = j[0] index += 1 # # todo # if row == currentMonth: # mlbn = monthdict[str(j + 1)][0] # mbbn = monthdict[str(j + 1)][1] # month = j + 1 # else: # mlbn = monthdict[str(currentMonth - row + 1 + j)][0] # mbbn = monthdict[str(currentMonth - row + 1 + j)][1] # month = currentMonth - row + 1 + j worksheet.write(7 + r + index, 1, month, style) worksheet.write(7 + r + index, 2, mlbn, style) worksheet.write(7 + r + index, 3, mbbn, style) if mbbn == 0: worksheet.write(7 + r + index, 4, '0.0%', style) else: worksheet.write(7 + r + index, 4, format(int(mlbn) / int(mbbn), '.1%'), style) # 遗留率 # bug遗留时效 worksheet.merge_range(row + 8 + r, 1, row + 9 + r, 1, '姓名', style) worksheet.merge_range(row + 8 + r, 2, row + 9 + r, 2, '遗留bug数 ', style) worksheet.merge_range(row + 8 + r, 3, row + 8 + r, 6, 'bug遗留时效 ', style) worksheet.write(row + 9 + r, 3, '一周', style) worksheet.write(row + 9 + r, 4, '一周~二周', style) worksheet.write(row + 9 + r, 5, '二周~一月', style) worksheet.write(row + 9 + r, 6, '一月以上', style) bugData = LEAVE_BUG[proj] print(bugData) del bugData['proj'] del bugData['lbn'] del bugData['bugStatusYear'] r1 = len(bugData) k = 0 total1 = 0 total2 = 0 total3 = 0 total4 = 0 total5 = 0 # for k in range(r): # 列 for name in bugData: # 行 total = bugData[name]['一周'] + bugData[name]['一周~二周'] + bugData[ name]['二周~一月'] + bugData[name]['一月以上'] worksheet.write(row + 10 + r + k, 1, name, style) # 姓名 worksheet.write(row + 10 + r + k, 2, total, style) # 遗留bug数 worksheet.write(row + 10 + r + k, 3, bugData[name]['一周'], style) # 一周 worksheet.write(row + 10 + r + k, 4, bugData[name]['一周~二周'], style) # 一周~二周 worksheet.write(row + 10 + r + k, 5, bugData[name]['二周~一月'], style) # 二周~一月 worksheet.write(row + 10 + r + k, 6, bugData[name]['一月以上'], style) # 一月以上 k += 1 total1 += total total2 += bugData[name]['一周'] total3 += bugData[name]['一周~二周'] total4 += bugData[name]['二周~一月'] total5 += bugData[name]['一月以上'] style_2 = workbook.add_format({ 'bold': True, # 字体加粗 'border': 1, # 单元格边框宽度 'align': 'center', # 水平对齐方式 'valign': 'vcenter', # 垂直对齐方式 # 'fg_color': color, # 单元格背景颜色 'text_wrap': True, # 是否自动换行 'font_size': 11, # 字体 'font_name': u'微软雅黑' }) worksheet.write(row + 10 + r + r1, 1, '汇总', style_2) worksheet.write(row + 10 + r + r1, 2, total1, style_2) worksheet.write(row + 10 + r + r1, 3, total2, style_2) worksheet.write(row + 10 + r + r1, 4, total3, style_2) worksheet.write(row + 10 + r + r1, 5, total4, style_2) worksheet.write(row + 10 + r + r1, 6, total5, style_2) if proj == 'BIM': worksheet.merge_range( row + 11 + r + r1, 1, row + 13 + r + r1, 6, "备注:2018年6月之前采用C++进行编码,此版本已经废弃,但Jira上保留C++编码版本Bug记录,本次统计已排除6月前Bug记录.2018年6月份之前的Bug共计41个", style_1) worksheet.set_column(1, 6, 12) worksheet.set_column(0, 0, 4) # print("新增项目列表:", NEW_PROJECT) print("【%s】工作表编写完毕".center(40, '-') % proj) connect.close()