def Show(CSV): DF = pd.read_csv(CSV) Show = tk.Tk() Show.title(">>>Stat Plot<<<") button1 = tk.Button(Show, text="histogramme", bg="SkyBlue2", width=10, command=lambda: Analyse.Plothist(DF)) button2 = tk.Button(Show, text="BoxPlot", bg="SkyBlue2", width=10, command=lambda: Analyse.PlotBox(DF)) button3 = tk.Button(Show, text="KdePlot", bg="SkyBlue2", width=10, command=lambda: Analyse.PlotKdeplot(DF)) button4 = tk.Button(Show, text="DistPlot", bg="SkyBlue2", width=10, command=lambda: Analyse.PlotDistplot(DF)) button5 = tk.Button(Show, text="ViolinPlot", bg="SkyBlue2", width=10, command=lambda: Analyse.PlotViolinplot(DF)) button1.pack() button2.pack() button3.pack() button4.pack() button5.pack()
def main(): import Analyse mini_line = [] while (1): one = mkMiniLine() if not one: # print "no one" continue if one and mini_line and mini_line[0] == one[0]: # print "if one and mini_line and mini_line[0] == one[0]" time.sleep(sleep_set) continue mini_line = one Analyse.addData(mini_line) res = Analyse.calcPool() # print res if res: opt = Analyse.addDeal(res) if opt: for one_opt in opt: print mini_line[0], sendOrder(one_opt) time.sleep(sleep_set) print "done res file"
def DisplayFrame(worldFrame, resourcesGRMaxE, mapFile, frameNo): creatSpec = Analyse.findSpecies(worldFrame[0]) if np.max(creatSpec[:,1])==0: colours = np.array(creatSpec[:,1]) else: colours = (np.array(creatSpec[:,1]))/float(np.max(creatSpec[:,1])) Analyse.plotForCreatures(speedReprThreshMouth, 1, 111, worldFrame[0], 'Speed', 'Repr Thresh', 'Mouth Size', 'Genetics Plot in %dth step'%(frameNo+1)) plt.show() if mapFile is not None: Graphics(mapFile=mapFile).DisplaySavedMapFrame(worldFrame, resourcesGRMaxE, frameNo, colours, creatSpec) else: Graphics().DisplaySavedMapFrame(worldFrame, resourcesGRMaxE, frameNo, colours, creatSpec)
def Csv(): MesCsv = tk.Tk() MesCsv.title("Les Csv Sont") MesCsv['bg'] = 'white' S = Analyse.GetAll() for x in S: Frame1 = tk.Frame(MesCsv, borderwidth=2, relief=tk.SUNKEN) Frame1.pack(side=tk.LEFT, padx=10, pady=10) tk.Label(Frame1, text=x).pack(padx=10, pady=10)
def biodiversity(step, worldHistory): '''Simpson's definition of diversity: 1 - (probability of two randomly chosen items being in the same group) ''' creatSpec = Analyse.findSpecies(worldHistory[step][0]) specPops = {} for creat in creatSpec: specPops[creat[1]] = specPops.get(creat[1], 0)+1 arr = np.array(specPops.values(), dtype=float) return 1- (np.sum(arr*(arr-1))/(np.sum(arr)*(np.sum(arr)-1)))
def Plots(): plot = tk.Tk() plot.title(">>>Data Choice<<<") DFList = Analyse.GetAll() for x in DFList: button = tk.Button(plot, text=x, bg="SkyBlue2", width=10, command=lambda s=x: Show(s)) button.pack()
def main(): global one_deal, order_result, win_ct, opt_ct, amount_all, test_result import Analyse test_tmp = 0 while (1): if data_index >= len(test_data): break one = mkMiniLine() if not one: continue Analyse.addData(one) res = Analyse.calcPool() if res: 1 # none # print res testPrint(res) mkCsvFileWin("test_info.csv", test_result) print test_tmp print "done res file"
def Run_test( Locust_file, ARGS, result_file, ): subprocess.call( r'locust -f {} --host {} --headless -u {} -r {} -t {} --csv={}'.format( Locust_file, 'https://' + ARGS.Server, ARGS.No_of_Users, ARGS.Hatch_Rate, ARGS.Run_Time, result_file), shell=True) analyser = Analyse.Analyse_Result_File(columns=columns, result_csv=result_file, args=ARGS) return analyser.get_Result()
def setUp(self): self.testobject = Analyse("Bert") self.diagrams = dict({ 1: GetDevelopDia(), 2: GetLogicalDia(), 3: GetPhysicalDia(), 4: GetProcesdia(), 5: GetUseCaseDia() }) self.requirement = dict({ 1: Requirement("Must", Priority(True, False, False, False), Status(False), 1), 2: Requirement("Should", Priority(False, True, False, False), Status(False), 2), 3: Requirement("Could", Priority(False, False, True, False), Status(False), 3), 4: Requirement("Would", Priority(False, False, False, True), Status(False), 4) })
def __init__(self): BASE = os.path.split(os.path.realpath(__file__))[0] if "Linux" == platform.system(): CFG_PATH = BASE + r'/Config' OUT_PATH = BASE + r'/output' self.hold_record_file = CFG_PATH + r'/hold_record.csv' self.f_record_cash = CFG_PATH + r'/hold_cash.json' self.f_total = OUT_PATH + r'/AssetOverview.csv' self.f_record = OUT_PATH + r'/HoldRecord_' #备注,此文件名必须加入时间后缀 else: CFG_PATH = BASE + r'\Config' OUT_PATH = BASE + r'\output' self.hold_record_file = CFG_PATH + r'\hold_record.csv' self.f_record_cash = CFG_PATH + r'\hold_cash.json' self.f_total = OUT_PATH + r'\AssetOverview.csv' self.f_record = OUT_PATH + r'\HoldRecord_' #备注,此文件名必须加入时间后缀 self.cash = 0 #账户现金 self.acc_earn = 0 #累积盈亏(已清算的) self.get_user_record() #用户持仓DataFrame self.ts = TushareApp.ts_app() self.sina = SinaApp.SinaApp() self.anly = Analyse.Analyse()
class TestAnalyse(TestCase): def setUp(self): self.testobject = Analyse("Bert") self.diagrams = dict({ 1: GetDevelopDia(), 2: GetLogicalDia(), 3: GetPhysicalDia(), 4: GetProcesdia(), 5: GetUseCaseDia() }) self.requirement = dict({ 1: Requirement("Must", Priority(True, False, False, False), Status(False), 1), 2: Requirement("Should", Priority(False, True, False, False), Status(False), 2), 3: Requirement("Could", Priority(False, False, True, False), Status(False), 3), 4: Requirement("Would", Priority(False, False, False, True), Status(False), 4) }) def test_CheckDia(self): if not self.testobject.CheckDia(1, 2, 3, 4, 5, self.diagrams): self.fail() else: pass def test_CheckReq(self): if not self.testobject.CheckReq(1, 2, 3, 4, self.requirement): self.fail() else: pass def test_addpoints(self): initial = self.testobject.getscore() endvalue = initial + 5 self.testobject.addpoints() if not int(self.testobject.getscore()) == endvalue: self.fail()
# -*- coding: utf-8 -*- """ Created on Sat Jun 15 08:47:41 2019 @author: yinchao """ import threading import Analyse import HoldRecordApp import ThreadQuit import time hd = HoldRecordApp.hd_record() aly = Analyse.Analyse() raw_record = hd.GetUserStockList() #获取监控列表 task_record_analyse = threading.Thread(target=hd.InfiniteHoldRecordAnalyse) task_record_analyse.start() print('成功创建持仓分析线程') #task_guard = threading.Thread(target=aly.AlarmGuard, args=[raw_record]) #task_guard.start() #print('成功创建实时预警线程') try: while True: time.sleep(1) except: #ctrl+c可以正常终止程序 ThreadQuit.stop_thread(task_guard) ThreadQuit.stop_thread(task_record_analyse) print('成功销毁所有线程,安全退出')
class Dico(object): 'The Dico class loads and manages the dictionnaries' def __init__(self): self.type = '' self.dictSw = {} #Simple word dictionary (former dicoMs) self.dictCw = {} #Compound word dictionary (former dicoMc) def load(self,fileList,type='dico'): """ initialises the dictionaries: defines the dictionary type and the files to use possible file types : dico,proteus chooses the loading function according to the class type """ self.type = type if (type == 'dico'): for fileDict in fileList: self.loadDict(fileDict) elif (type == 'sdico'): for fileDict in fileList: self.loadSDict(fileDict) elif (type == 'proteus'): self.loadProteus(fileList[0],fileList[1]) elif (type == 'dicomc'): for fileDict in fileList: self.loadDictCw(fileDict) def loadDict(self,nameFileDict): """reads the simple word dictionaries (with .dico extension) from a file of the following format form<tab>lemma<tab>tag<tab>... the first line of the file must start with '>' and contain tag names separated by tabulation """ #position of the inflected form inflPos = 0 dictCheck = {} #creates a new dictionary with the same keys as simple word Dictionary to check #whether an inflected form has already been loaded dictCheck.fromkeys(list(self.dictSw.keys())) try: filePtr = open(nameFileDict,encoding='utf-8') #cpt =0 for line in filePtr.readlines(): #cpt = cpt + 1 #print str(cpt) #+' '+chr(13), if line.startswith('>'): #treatment of the first line of the dictionary file tags = line[1:].rstrip().split("\t") inflPos = tags.index('f') tags.remove('f') elif not line.startswith('#'): infos = line.rstrip().split("\t") #infos = [x.encode() for x in line.rstrip().split("\t")] inflForm = infos.pop(inflPos) if inflForm in dictCheck: #adds a new dictionary to the list of dictionaries #loads a serialised list of dictionaries for the given inflected form tmpDict = pickle.loads(self.dictSw[inflForm]) #creates a dictionary with tags as keys and info as values newDict = dict(list(zip(tags,infos))) #adds a new dictionary to the list of dictionaries tmpDict.append(newDict) else: #adds a new dictionary list for the inflected form tmpDict = [dict(list(zip(tags,infos)))] dictCheck[inflForm] = 1 self.dictSw[inflForm] = pickle.dumps(tmpDict) except Exception as e: sys.stderr.write(e) sys.stderr.write('error: reading of a simple words dictionary file') raise exit def loadDictCw(self,nameFileDict): 'reads the compound word dictionary' try: filePtr = open(nameFileDict,encoding='utf-8') for line in filePtr.readlines(): if line.startswith('>'): tags = line[1:].rstrip().split("\t") inflPos = tags.index('f') tags.remove('f') elif not line.startswith('#'): infos = line.rstrip().split("\t") lwords = infos.pop(inflPos).replace("-"," - ").replace("'","' ").split(" ") dico = self.dictCw for m in lwords: if not m in dico: nd = {} dico[m] = [{},nd] prec = dico dico = nd else: dico = dico[m][1] if m in prec: prec[m][0] = [dict(list(zip(tags,infos)))] else: prec[m] = [[dict(list(zip(tags,infos)))],{}] # pas d'ambiguité pour les composés except: sys.stderr.write('error: reading of a compound word dictionary file ['+line.rstrip()+']['+nameFileDict+']\n') raise exit def loadSDict(self,nameDictSer): 'reads dictionary from a serialised file' newDictSer = open(nameDictSer,'r') self.dictSw = pickle.load(newDictSer) def loadProteus(self,dossierTables,filePtrLemmes): 'reads proteus dictionary' self.ana = Analyse(dossierTables,filePtrLemmes) def writeSDict(self,nameDictSer): 'writes the serialised dictionary representation into a file' newDictSer = open(nameDictSer,'w') pickle.dump(self.dictSw,newDictSer) newDictSer.close() def get(self,inflForm): 'dictionary access' try: if (self.type == 'dico' or self.type == 'sdico'): return self.getDict(inflForm) elif (self.type == 'proteus'): return self.getProteus(inflForm) except: return [] def getDictCw(self): 'compound word dictionary access' return self.dictCw def getDict(self,inflForm): 'access to a dictionary in the hash table form' return pickle.loads(self.dictSw[inflForm]) def getProteus(self,inflForm): 'proteus dictionary access' return self.ana.analyse(inflForm)
def loadProteus(self,dossierTables,filePtrLemmes): 'reads proteus dictionary' self.ana = Analyse(dossierTables,filePtrLemmes)
warnings.simplefilter("ignore", UserWarning) import os.path import Analyse import outils import pandas as pd import sys from gensim.models import LdaModel if __name__ == '__main__': if len(sys.argv) < 2 & len(sys.argv) > 4: print("USAGE: argument expected") sys.exit(1) if os.path.isfile(sys.argv[1]): print("Lecture de la matrice") Analyse.prepare(sys.argv[1]) print("lecture du dictionnaire") loaded_dict = corpora.Dictionary.load('mydict.dict') print("lecture du corpus") corpus = corpora.MmCorpus('bow_corpus.mm') nbtopic = 5 output = './coordonnees.json' if len(sys.argv) > 2: if int(sys.argv[2]) > 1 & int(sys.argv[2]) < 200: nbtopic = int(sys.argv[2]) else: if os.path.isfile(sys.argv[2]): print("Attachement des Metadatas") a = pd.read_csv( sys.argv[3], sep="\t",
def main(): global test_result, global_test_data, global_data_index, global_error_index, global_give_opside global global_true_order_direction, global_true_stop_direction, global_price_chg_min import Analyse import Analyse_pool_todo as Analyse_pool test_tmp = 0 stop_direction = 0 crr_direction = 0 check_i = 0 check_max = 600000 smooth_market_indexs = [0,0,0,0,0,0] Analyse_pool.initAnaType("time") Analyse_pool.initPoolSize(18, "time") long_time_direction = 0 while(1): if global_data_index >= len(global_test_data): break one = mkMiniLine() if not one: continue check_info = Order.checkTick(one[6], one[7], one[5]) if check_info: stop_direction = global_give_opside * check_info['direction'] global_error_index += check_info['profit'] check_info_true = TrueOrder.checkTick(one[6], one[7], one[5]) Analyse.addData(one[:]) tmp = one[:] tmp[2] = one[9] tmp[3] = one[10] Analyse_pool.addData(tmp) res = Analyse.calcPool() res2 = Analyse_pool.calcPool() if res2: if res2[3] != 0: global_give_opside = -1 long_time_direction = res2[3] stop_direction = long_time_direction # print res[3], res2[3] if res: crr_sig = Analyse.crrSigStatus() # sendOrder("3", one[5], one[0] + " " + one[1], True, crr_sig) ## global_give_opside= 0 # if crr_direction == crr_sig[0]: # global_give_opside=0 ## if crr_sig[1] > 0 and crr_sig[0] > 0: # if crr_sig[1] >= 15: # global_give_opside = 1 ## elif crr_sig[1] > 8 and crr_sig[0] == -1: ## global_give_opside = 1 # else: # sendOrder("3", one[5], one[0] + " " + one[1], True, crr_sig) # global_give_opside=0 # stop_direction = global_give_opside * Order.global_deal['direction'] # if Order.isDealOpen() and (crr_sig[0] == 0 or crr_sig[0] == Order.global_deal["direction"]): # if Order.orderProfit(Order.global_deal, one[5]) < global_price_chg_min: # check_i += 1 ## else: ## check_i -= 1 # if abs(check_i) > check_max: # print check_i # check_i = 0 # global_give_opside = 1 # sendOrder("3", one[5], one[0] + " " + one[1], True, crr_sig) # global_give_opside= 0 # stop_direction = global_give_opside * Order.global_deal['direction'] # global_true_order_direction = 1 crr_direction = crr_sig[0] market_indexs = [res[1],res[2],res[5],res[6],res[7],res[8]] smooth_market_indexs = smoothIndex(market_indexs, smooth_market_indexs) opt = Analyse.addDeal(res) if opt: for one_opt in opt: # one_opt = str(random.randint(1,2)) # print stop_direction if stop_direction == Order.OP_SELL: # print stop_direction, one_opt if one_opt == "2": one_opt = "3" elif one_opt == "1": stop_direction = 0 elif stop_direction == Order.OP_UP: # print stop_direction, one_opt if one_opt == "1": one_opt = "3" elif one_opt == "2": stop_direction = 0 # print global_true_order_direction order_info = sendOrder(one_opt, one[5], one[0] + " " + one[1], False, crr_sig) # print res res = Order.profitInfo() res_real = TrueOrder.profitInfo() mkCsvFileWin("test_deals_info.csv", res) mkCsvFileWin("test_deals_info_real.csv", res_real) mkCsvFileWin("test_info.csv", test_result) print test_tmp print "done res file"
""" Here we will test the functions of analysis """ # -*- coding: UTF-8 -*- from time import sleep import pandas as pd import Analyse import settings df = pd.read_excel("csv/Histórico dos Chamados.xlsx") descriptions = df[df["SERVICO"] == "Computadores e Acessórios"]["DESCRICAO"] # Get tokens, make csv with them; and get summarized descriptions which contains some tokens Analyze = Analyse.Analysis(descriptions) nameDf = "csv/Tokens - Computadores e Acessórios.csv" Analyze.getFrequentTokens(nameToSave=nameDf) goodDescripts = [Analyze.analyseDescription(description, nameDf, 3) for description in descriptions] goodDescripts = [x for x in goodDescripts if x != None] # Now, remove bad simbols greatDescripts = Analyze.removeBadSimbols(goodDescripts, settings.simbols) # And html/javascript tags greatDescripts = Analyze.removeTags(greatDescripts) # Save descripts in csv file dfDescript = pd.Series(greatDescripts) dfDescript.to_csv('Descrições - Computadores e Acessórios.csv', encoding='utf-8')
def DisplaySim(worldHistory, resourcesGRMaxE, displayVisualSim=True, mapFile=None): if displayVisualSim: if mapFile is not None: g = Graphics(mapFile=mapFile) else: g = Graphics() g.DisplaySavedMap(worldHistory, resourcesGRMaxE) # pygame.quit() print 'Simulation Complete.....Analysing Data' popForStep = np.ndarray(len(worldHistory)) # bioForStep = np.ndarray(len(worldHistory)) for step in xrange(len(worldHistory)): popForStep[step] = totPop(step, worldHistory) # bioForStep[step] = biodiversity(step, worldHistory) POI = np.clip(np.argmax(popForStep), 10, len(worldHistory)-16) Analyse.plotForSteps(avgSpeed, 1, 231, len(worldHistory), "Avg Speed", 'ro-', 1, (worldHistory)) Analyse.plotForSteps(totPop, 1, 232, len(worldHistory), "Population", 'bo-', 1, (worldHistory)) #Analyse.plotForCreatures(speedVis, 1, 233, worldHistory[POI][0], 'Speed', 'Vis', 'Speed vs Vision in 914th step') Analyse.plotForSteps(avgVis, 1, 234, len(worldHistory), "Avg Vis", 'go-', 1, (worldHistory)) Analyse.plotForSteps(totERes, 1, 235, len(worldHistory), "Resource Energy", 'yo-', 1, (worldHistory)) Analyse.plotMeanSteps(biodiversity, 1, 236, len(worldHistory), "Biodiversity", 'bo-', 8, 5, (worldHistory)) #Analyse.plotForCreatures(speedReprThreshMouth, 2, 231, worldHistory[POI-5][0], 'Speed', 'Repr Thresh', 'Mouth Size', 'Genetics Plot in %dth step'%(POI-4)) #Analyse.plotForCreatures(speedReprThreshMouth, 2, 232, worldHistory[POI][0], 'Speed', 'Repr Thresh', 'Mouth Size', 'Genetics Plot in %dth step'%(POI+1)) #Analyse.plotForCreatures(speedReprThreshMouth, 2, 233, worldHistory[POI+5][0], 'Speed', 'Repr Thresh', 'Mouth Size', 'Genetics Plot in %dth step'%(POI+6)) #Analyse.plotForCreatures(speedReprThreshMouth, 2, 234, worldHistory[POI+25][0], 'Speed', 'Repr Thresh', 'Mouth Size', 'Genetics Plot in %dth step'%(POI+26)) #Analyse.plotForCreatures(speedReprThreshMouth, 2, 235, worldHistory[POI+50][0], 'Speed', 'Repr Thresh', 'Mouth Size', 'Genetics Plot in %dth step'%(POI+51)) Analyse.plotForCreatures(speedReprThreshMouth, 2, 121, worldHistory[499][0], 'Speed', 'Repr Thresh', 'Mouth Size', 'Genetics Plot in %dth step'%(500), False) Analyse.plotForCreatures(speedReprThreshMouth, 2, 122, worldHistory[1499][0], 'Speed', 'Repr Thresh', 'Mouth Size', 'Genetics Plot in %dth step'%(1500), False) #Analyse.plotForCreatures(speedReprThreshMouth, 3, 111, worldHistory[POI][0], 'Speed', 'Repr Thresh', 'Mouth Size', 'Genetics Plot in %dth step'%(POI), True) #Analyse.findSpecies(worldHistory[POI+599][0], True) plt.show()