def next(self): if self.f_index >= self.dataset.get_n_files() or self.f_index > self.max_files: raise StopIteration else: self.f_index += 1 f_number = self.file_order[self.f_index - 1] acoustics_file = self.dataset.get_input_file(f_number) acoustics = Data.expand_array_in_blocks(acoustics_file, self.dataset.n_frames, self.dataset.frame_shift) if self.dataset.has_output(): i = 0 output = [] times = self.dataset.get_output_times_file(f_number) labels = self.dataset.get_output_values_file(f_number) for j in xrange( Data.n_blocks(acoustics_file.shape[0], self.dataset.n_frames, self.dataset.frame_shift) ): while (self.dataset.n_frames // 2 + j) >= times[i][1] and i < len(times) - 1: i += 1 output.append(labels[i]) x = [acoustics, np.array(output)] # Get label for each t else: x = [acoustics] if self.return_path: x.append(self.dataset.get_file_path(f_number)) return tuple(x)
def get(self): qq = self.get_argument('qq', None) passwd = self.get_argument('skey', None) email = self.get_argument('email', None) recaptcha_response_field = self.get_argument('recaptcha_response_field', None) recaptcha_challenge_field = self.get_argument('recaptcha_challenge_field', None) print recaptcha_response_field #block waring need Async!! ret = libs.captcha.submit(recaptcha_challenge_field, recaptcha_response_field, Config.recaptcha_privatekey, self.request.remote_ip) print ret.is_valid tips = '' while True: if ret.is_valid == False: tips = '验证码错误' break if re.match("^.+\\@(\\[?)[a-zA-Z0-9\\-\\.]+\\.([a-zA-Z]{2,3}|[0-9]{1,3})(\\]?)$", email) == None: email = None if passwd == None or qq == None: tips = 'SKey or QQ 不合法,重新输入' break if email != None: Data.updateUserEmailData(qq, email) Data.updateUserData(qq, passwd) tips = '提交成功,每日0和12点自动签到' break self.render("template_taken.html", title = 'QQ群自动签到', tips = tips)
def getPredictLabelWithScikit(header): clfScikit = pickle.load(open(r'./RandomForestScikitModel')) #print 'header:',header headerVec = GenerateVectorMulLines_everyline.transformHeader2Vector(header[:]) Data.transformVector2LibsvmStyle(headerVec,'./pythonsrc/tmp/tmp.svmdata') X, y = load_svmlight_file('./pythonsrc/tmp/tmp.svmdata') y_pred = clfScikit.predict(X) label = [ '<'+ Data.CLASSIFICATION[int(x)]+'>' for x in y_pred] return label
def __save(addToLog=True): Thread.Lock("Framework.Dict", addToLog=False) dictToSave = copy.deepcopy(__dict) global __saveScheduled __saveScheduled = False try: Data.__pickle("%s/Dict" % Data.__dataPath, dictToSave) if addToLog: PMS.Log("(Framework) Saved the dictionary file") finally: Thread.Unlock("Framework.Dict", addToLog=False)
def close_open(self, y): """ Logic to load an account. :param y: string """ self.transaction_list = Data.load(str(y))["transactions"] self.categories = Data.load(str(y))["categories"] self.amounts = Data.load(str(y))["category_amounts"] self.s.set(str(y)) self.new_window.destroy() self.t.set("$" + str(Register.get_total(self.transaction_list))) self.display_list()
def fill_files_buffer(self): self.input_files = [] self.output_files = [] self.srcs = [] buffer_size = 0 index = 0 if self.file_index >= len(self.files_order): raise StopIteration # Fill the buffer while buffer_size < self.max_buffer_size and self.file_index < len(self.files_order): ##TODO add size in bytes # Get the output and output files inp = self.dataset.get_input_file(self.files_order[self.file_index]) outp = self.dataset.get_output_values_file(self.files_order[self.file_index]) self.input_files.append(inp) self.output_files.append(outp) buffer_size += inp.shape[0] * inp.ctypes.strides[0] buffer_size += outp.shape[0] * outp.ctypes.strides[0] inp_src = inp.ctypes.data outp_src = outp.ctypes.data times = self.dataset.get_output_times_file(self.files_order[self.file_index]) if self.dataset.has_times: i = 0 tb0 = 0.5 * (self.dataset.frame_duration + (self.n_frames - 1) * self.dataset.frame_time_shift) time_step = self.dataset.frame_time_shift * self.frame_shift for j in xrange(Data.n_blocks(inp.shape[0], self.n_frames, self.frame_shift)): # Time for the center of the block of frames t = tb0 + time_step * j # i is the index in the label file corresponding to that time while t > times[i][1] and i < len(times) - 1: i += 1 outp_src += int(self.output_size) self.srcs.append((inp_src, outp_src)) # Add an entry to the list of addresses inp_src += int(self.frame_size * self.frame_shift) else: # Has frame indexes instead of times i = 0 # for j in xrange(1+self.n_frames//2, Data.n_blocks(inp.shape[0], self.n_frames, self.frame_shift)): # while j > times[i][1] and i < len(times)-1: for j in xrange(Data.n_blocks(inp.shape[0], self.n_frames, self.frame_shift)): # while (self.n_frames//2 + j) > times[i][1] and i < len(times)-1: while (self.n_frames // 2 + j) >= times[i][1] and i < len(times) - 1: i += 1 outp_src += int(self.output_size) self.srcs.append((inp_src, outp_src)) # Add an entry to the list of addresses inp_src += int(self.frame_size * self.frame_shift) # Next file and next buffer index index += 1 self.file_index += 1 if self.shuffle: random.shuffle(self.srcs) self.srcs_index = 0
def mark_result(): actualdir = globalData.PATH + '/TestResult/ActualScreenShot/' diffdir = globalData.PATH + '/TestResult/DifferentScreenShot/' for i in os.listdir(diffdir): if(endWith(os.path.join(diffdir, i), '.png')): moudle = i.split('_')[0] case = i.split('_')[1].split('.')[0] Data.setExecutionresult(moudle, int(case), 'Fail') for j in os.listdir(actualdir): if(endWith(os.path.join(actualdir, j), '.png')): if(os.path.exists(os.path.join(diffdir, j)) == False): moudle = j.split('_')[0] case = j.split('_')[1].split('.')[0] Data.setExecutionresult(moudle, int(case), 'Pass')
def main(self): print print("Romulus10's Quick Checkbook Register") print done = False while not done: print(self.account_name) self.total = self.fin.get_total(self.transactions) if self.total is None: print("$0") else: print('$' + str(self.total)) cmd = raw_input('> ') cmd = cmd.split(' ') while len(cmd) < 4: cmd.append('') if cmd[0] == "quit": done = True if cmd[0] == "help": print(self.help) if cmd[0] == "new": if cmd[1] != '': self.transactions = Data.new(cmd[1]) self.account_name = cmd[1] if cmd[0] == "load": if cmd[1] != '': self.transactions = Data.load(cmd[1])["transactions"] self.categories = Data.load(cmd[1])["categories"] self.amounts = Data.load(cmd[1])["category_amounts"] self.account_name = cmd[1] if cmd[0] == "save": Data.save(self.account_name, self.transactions) if cmd[0] == "copy": Data.copy(cmd[1], cmd[2]) if cmd[0] == "add": if cmd[1] != '' and cmd[2] != '' and cmd[3] != '': self.add(cmd) if cmd[0] == "delete": if cmd[1] != '': x = None for y in self.transactions: if y.number == int(cmd[1]): x = y self.transactions.remove(x) if cmd[0] == "print": t = PrettyTable(["Number", "Name", "Category", "Date", "Amount"]) for x in self.transactions: t.add_row([x.number, x.name, x.category, x.date, ('$' + str(x.value))]) print(t) if cmd[0] == "categories": t = PrettyTable(["Name", "Current Value"]) for i in range(len(self.categories)): t.add_row([str(self.categories[i]), ('$' + str(self.amounts[i]))]) print(t) if cmd[0] == "gui": gui = TKBook() gui.root.mainloop() print print
def __save(): global __saveScheduled Thread.Lock("Framework.HTTPCache", addToLog=False) try: # Save the cache Data.__pickle("%s/HTTPCache" % Data.__dataPath, __cache) # Save the cookie jar if __cookieJar is not None: __cookieJar.save("%s/HTTPCookies" % Data.__dataPath) finally: __saveScheduled = False Thread.Unlock("Framework.HTTPCache", addToLog=False) PMS.Log("(Framework) Saved shared HTTP data")
def main(): tickers = Data.readTickerFile(Config.TickerFilePath) year, month, day = map(int, Config.TimeBegin.split('-')) tsBeg = datetime(year, month, day) year, month, day = map(int, Config.TimeEnd.split('-')) tsEnd = datetime(year, month, day) for t in tickers: ts = Data.getStockPrices(t, tsBeg, tsEnd) ts = ts['Adj Close'] #res = Analyzer.adfullerTest(ts) #print t + ' Ad Fuller Test = ' #print res res = Hurst.hurst(ts) print t + ' Hurst = %f' %res return 0
def __init__(self, master=None): """the main constructor of the frame. As it is quite big, i split it up into subfunctions for the various ui parts. Might be worth to instead use children classes instead. The main app window will have a status bar at the bottom with progress messages and stuff. The main window will be the square matplotlib canvas on the right and a narrowish bar with samples, sliders, etc on the right """ tk.Frame.__init__(self, master, relief=tk.SUNKEN) self.master = master self.d = Data(self) self.c = Config(O) self.canvas = dict() self.d.sList = [] self.d.oList = [] self.init_canvas_hyperbolas() self.init_canvas_pwp(active=True) self.init_sample_frame() #self.init_statusbar() self.init_slider_frame() self.init_menubar() #enable expansion tk.Grid.rowconfigure(self,1,weight=10) tk.Grid.rowconfigure(self,2,weight=1) tk.Grid.columnconfigure(self,1,weight=1) tk.Grid.columnconfigure(self,2,weight=1) tk.Grid.columnconfigure(self,0,weight=2)
def get_serial(line): # print 'line', line try: if line[0] is ' ': print line[1:] return else: vals = line.split(',') if vals[0] is '': print 'return', vals return if vals[0] in 'AGMDE': # global angles #print 'AGMDE', vals Data.set_angle(vals[0], map(int, vals[1:])) # # if len( logs[vals[0]] ) > 1024: # logs[vals[0]].pop(0) # # logs[vals[0]].append( map( int, vals[1:] ) ) elif vals[0] in 'T': Data.set_timings(map(int, vals[1:])) elif vals[0] in 'P': Data.set_ratios(map(float, vals[1:])) elif vals[0] in 'R': Data.set_rx(map(int, vals[1:])) elif vals[0] in 'abcd': global motors Data.set_pid(vals[0], map(int, vals[1:])) # motors[vals[0]].put(map( int,vals[1:]) ) else: print 'line', line, except Exception as e: print 'serial error:', print line print e.message
def checkend(self): if self.user[2] <= 0: #zero health! print "OH NO. You lost..." print "Going to main menu." time.sleep(3) Data.start_game(self.original_stats) if self.mons[1] <= 0: print "You won!" gain = random.randint(1, 5) if gain > 2: print "Gained stats!" self.original_stats[2] += gain ##todo: stat changing per level gain? self.original_stats[3] += gain self.original_stats[4] += gain self.original_stats[5] += gain time.sleep(2) Data.start_game(self.original_stats)
def feedforward(x): for i, l in enumerate(layers): if i == 0: blocks = Data.expand_array_in_blocks(x, dataset.n_frames, dataset.frame_shift) x = l.feedforward(blocks.T).T.copy() else: x = l.feedforward(x.T).T.copy() return x
def test_Data(self): gs = self.gs t_s = time.time() t_data_s = time.time() data = Data(gs) t_data_e = time.time() # print data t_parse_s = time.time() data.parse(gs) t_parse_e = time.time() # print data print "data diff: %d ms" % (int((t_data_e - t_data_s) * 1000)) print "pars diff: %d ms" % (int((t_parse_e - t_parse_s) * 1000)) bots = [] t_bots_s = time.time() bots.append(NextBot(data, 30)) # bots.append(SendToNext(data, 5)) # bots.append(RndBot(data, 5)) # ots.append(RndBot(data, 7)) bots.append(RndBot(data, 30)) for bot in bots: bot.calc() t_bots_e = time.time() print "bots diff: %d ms" % (int((t_bots_e - t_bots_s) * 1000)) for bot in bots: bot.run() t_e = time.time() for bot in bots: s1, s2, s3, r, a, n = bot.get() print str(s1) + ", " + str(s2) + ", " + str(s3) + ", " + str(a) + ", " + str(n) + "\n" print "-" * 80 bot = data.getBest(bots) print bot.get() bot.correction(data) print "-" * 80 for i, c in enumerate(data.camps): if c[C_OWNER] == 1: print "id: ", i, " ", c[0:5] print "-" * 80 for i, c in enumerate(bot.camps): if c[C_OWNER] == 1: print "id: ", i, " ", c[0:5] print "all diff: %d ms" % (int((t_e - t_s) * 1000))
def handler(clientsock,addr): data = clientsock.recv(BUFSIZ) #clientsock.send('echoed:..', data) if data: print data message = Data.handleMessage(data) print "sending ["+message+"]" clientsock.send(message) clientsock.close()
def __loadCache(): global __cache path = "%s/HTTPCache" % Data.__dataPath if os.path.exists(path): try: __cache = Data.__unpickle(path) PMS.Log("(Framework) Loaded HTTP cache") except: __cache = {} PMS.Log("(Framework) An error occurred when loading the HTTP cache")
def hyper_search(X, y, X_dev=None, y_dev=None, ccc=False): if X_dev == None: dataset = Data.partition(X, y) X_train = dataset["X_train"] y_train = dataset["y_train"] X_dev = dataset["X_dev"] y_dev = dataset["y_dev"] else: X_train = X y_train = y # Find Optimal Hyperparameter Setting lam_arr = [0.01, 0.05, 0.1, 1, 10] a_arr = [0, 0.001, 0.01, 0.1, 0.5, 1] eta_arr = [0.01, 0.1, 0.3, 0.5, 0.7, 0.9, 1, 1.1] n_arr = [10, 20, 30, 50] var_prior_arr = [0.1, 0.5, 0.7, 1, 1.1, 1.5, 2] # lam_arr = [0.02,0.03,0.04] # a_arr = [0,0.01] # eta_arr = [0.0001] # n_arr = [50,100,150] # var_prior_arr = [0.1,3] best_ep = 1e6 best_em = 1e6 if ccc: best_ep = -1e6 best_em = -1e6 params = {} for lam in lam_arr: for n in n_arr: ###############################FOR EP################################################ for var_prior in var_prior_arr: err = ep_run(X_train, y_train, X_dev, y_dev, n, lam=lam, var_prior=var_prior, ccc=ccc) if (ccc and err > best_ep) or (not ccc and err < best_ep): best_ep = err params["lam"] = lam params["n"] = n params["var_prior"] = var_prior ###############################FOR EM################################################ for eta in eta_arr: for a in a_arr: err = em_run(X_train, y_train, X_dev, y_dev, n, lam=lam, eta=eta, a=a, ccc=ccc) if (ccc and err > best_em) or (not ccc and err < best_em): best_em = err params["lam_em"] = lam params["n_em"] = n params["eta"] = eta params["a"] = a # print params print "best EP error: " + str(best_ep) print "best EM error: " + str(best_em) print "best params" print params
def doTurn(self, gamestate): self.round=self.round+1 self.logme("start round %d\n"%self.round) t1 = time.time() # first step, init data if self.data==None: self.data = Data(gamestate) self.data.parse(gamestate) self.logme("data parsed\n") # set alle bots bots=[] #bots.append(DoNothing(self.data, 0)) for x in [9,14,19,24,29]: #2,5,10,15,20,30, 50]: bots.append(NextBot(self.data, x)) bots.append(RndBot(self.data, x)) bots.append(SendToNext(self.data, x)) bots.append(AttackNeutral(self.data, x)) self.logme("bots setted\n") #self.logme("botslen: %d\n"%(len(bots))) for bot in bots: #self.logme(bot.getName()+"\n") bot.calc() t1a = time.time() for bot in bots: #print s bot.run() t1b = time.time() self.logme("simulat ok, get best\n") for bot in bots: s1,s2,s3,r,a,n=bot.get() self.logme(str(n)+": "+ str(s1)+", "+str(s2)+", "+str(s3)+", "+str(a)+"\n") bot = self.data.getBest(bots) if bot==None: self.logme("##################################") self.logme("bot was None !!!!!!!!!!!!") self.logme("##################################") bot=bots[0] bot.correction(self.data) armies = bot.get()[4] self.logme("-----------------------------------------------------------------------\n") self.logme("best: "+str(armies)+"\n") t2=time.time() #################### self.logme("send armies to gamestate\n") for a in armies: gamestate.issueOrder(gamestate.getCamp(a[A_SRC]), gamestate.getCamp(a[A_DST]), a[A_CNT]) t3=time.time() d1=(t2-t1)*1000 d2=(t3-t2)*1000 dall=(t3-t1)*1000 self.logme(">>>>> t %d: %d ms %d ms [%d] ms<<<<<<\n"%(self.round, int(d1), int(d2), int(dall)))
def classify(): file = open('result_gbc.txt', 'w') X_train, X_test, y_train, y_test = Data.get_dataset(0.3) clf = OneVsRestClassifier(GradientBoostingClassifier(n_estimators=100)) clf.fit(X_train, y_train) joblib.dump(clf, 'gbc_model.pkl') y_prediction = clf.predict(X_test) acc = accuracy_score(y_test, y_prediction) print(acc) file.write(str(acc)) file.close()
def classify(): file = open('result.txt', 'w') X_train, X_test, y_train, y_test = Data.get_dataset(0.3) clf = OneVsRestClassifier(svm.SVC()) clf.fit(X_train, y_train) joblib.dump(clf, 'svm_model.pkl') y_prediction = clf.predict(X_test) acc = accuracy_score(y_test, y_prediction, normalize=False) print(acc) file.write(str(acc)) file.close()
def __load(): global __dict path = "%s/Dict" % Data.__dataPath if os.path.exists(path): try: __dict = Data.__unpickle(path) PMS.Log("(Framework) Loaded the dictionary file") except: PMS.Log("(Framework) The dictionary file is corrupt & couldn't be loaded") __loadDefaults() else: __loadDefaults()
def DoTasker(): lstUserData = Data.laodUserData() emails = [] for d in lstUserData: login = Login(d[0], d[1]) skey, reason = login.autologin() if skey == False: # 签到失败,删除用户信息,发送邮件提醒 email = Data.getUserEmailData(d[0]) if email != None and len(email) > 0: emails.append(email) Data.removeUserData(d[0]) Data.removeUserEmailData(d[0]) sign = Sign(d[0], skey) if sign.autosign() == False: pass # time.sleep(1) # print emails if len(emails) > 0: email = libs.mail.Message(Config.smtp_user, emails, Config.mail_notify) try: conn = libs.mail.Connection(Config.smtp_server, 25, Config.smtp_user, Config.smtp_pass) conn.send_message(email) except: pass
def remove7(path_number): with open("tmp.txt", 'w') as tmp: with open("Path#" + str(path_number) + ".txt", "r") as f: for line in f: if line.strip(): tmp_d = da.data_from_line(line) if tmp_d.sonar_id != 7: tmp.write(line) with open("tmp.txt", 'r') as tmp: with open("Path#" + str(path_number) + ".txt", "w") as f: for line in tmp: f.write(line) os.remove("tmp.txt")
def player_move(self): print '''What would you like to do? [1]attack [2]insult [3]run''' user_choice = raw_input("\n") try: user_choice = int(user_choice) except: print "invalid" clear.clrscr() self.player_move() if user_choice == 1: self.player_attack() elif user_choice == 2: self.player_insult() elif user_choice == 3: print "You ran!!" Data.start_game(self.original_stats) else: print "Not a valid choice." self.player_move()
def format_file(path_number): with open("tmp.txt", 'w') as tmp: with open("Path#" + str(path_number) + ".txt", "r") as f: for line in f: if line.strip(): tmp_d = da.data_from_line(line) if MIN_MEASURE < tmp_d.measure < MAX_MEASURE: tmp.write(line) with open("tmp.txt", 'r') as tmp: with open("Path#" + str(path_number) + ".txt", "w") as f: for line in tmp: f.write(line) os.remove("tmp.txt")
def doMaterialsTexture(self): # Our single material class is the material there for complexMaterial in self.XMLMaterials.getAllComplexMaterials(): dictCompMaterial = {} compMatName = self.XMLMaterials.getComplexMaterialName(complexMaterial) for material in self.XMLMaterials.getMaterialsFromComplexMaterial(complexMaterial): dictAtomsMaterial = {} matName = self.XMLMaterials.getMaterialName(material) matWavelentgh = int(self.XMLMaterials.getMaterialWavelenght(material)) for atomMaterial in self.XMLMaterials.getAtomMaterialsFromMaterial(material): atomMatName = self.XMLMaterials.getAtomMaterialName(atomMaterial) # @UnusedVariable atomMatClass = self.XMLMaterials.getAtomMaterialClass(atomMaterial) atomMatPercent = int(self.XMLMaterials.getAtomMaterialPercent(atomMaterial)) # Create single material if the class of material defined in the XML exists if(atomMatClass in Data.__dict__): # @UndefinedVariable singleMatClassObject = Data.__dict__[atomMatClass]() # @UndefinedVariable logging.debug('singleMatClasObject ' + str(singleMatClassObject)) dictAtomsMaterial[singleMatClassObject] = atomMatPercent else: logging.error('Method doMaterialsTexture, No class from material found:' + str(atomMatClass)) materialObject = Data.SingleMaterial(dictAtomsMaterial, matName) dictCompMaterial[matWavelentgh] = materialObject self.materials.append(Data.ComplexMaterial(dictCompMaterial, compMatName))
def __init__(self, parent=None): pg.setConfigOption('background', 'w') super(ResultsWidget, self).__init__(parent) self.setupUi(self) self.exercise_list = Data.loadObjects('Data_backend.txt') self.most_recent_angle = self.exercise_list[-1].Target_A self.Current_angle.setText(str(self.most_recent_angle)) self.proggression_percent = str( int(self.most_recent_angle) * 100 / 135) self.Current_progression.setText(self.proggression_percent) datelist = [] anglelist = [] for i in self.exercise_list: datelist.append(str(i.Date)) anglelist.append(int(i.Target_A)) weeks = Data_target.week_list Tangles = Data_target.angle_list datetime_first = datetime.datetime.strptime(datelist[0], '%Y%m%d') for i in range(0, len(weeks)): weeks[i] = datetime_first + datetime.timedelta(weeks=int(weeks[i])) weeks[i] = int(weeks[i].strftime('%Y%m%d')) for i in range(0, len(datelist)): datelist[i] = int(datelist[i]) # points = 100 # X = range(0,points) # Y = np.exp2(X) # self.Plot.plot(weeks, Tangles, pen=(53, 168, 224), symbolBrush=(53, 168, 224), symbolPen='w', symbol='o', symbolSize=5, name="symbol='star'") self.Plot.plot(datelist, anglelist, pen=(248, 177, 51), symbolBrush=(248, 177, 51), symbolPen='w', symbol='star', symbolSize=20, name="symbol='star'")
def src_dest_query(update: Update, context: CallbackContext, mode: str) -> None: chat_id = update.message.chat_id chat_data = Data.update(chat_id) if chat_data[chat_id][QUERIES][mode] != -1: try: update.message.bot.deleteMessage( chat_id=chat_id, message_id=chat_data[chat_id][QUERIES][mode]) except: pass markup = src_dest_menu_markup(mode, chat_id) query = update.message.reply_text(SRC_DEST_QUERY_MESSAGE[mode], reply_markup=markup) query_id = query.message_id chat_data[chat_id][QUERIES][mode] = query_id chat_data[chat_id][QUERIES][LAST][ID] = query_id chat_data[chat_id][QUERIES][LAST][POINTER] = mode Data.write(chat_data) unrecognized_zero(chat_id)
def query(self): name = self.name.get() sid = self.sid.get() self.table.clear() ds = Data.tea() try: for s in ds: if name != '' and s[1] != name: continue if sid != '' and s[0] != sid: continue self.table.add([s[1], s[0], s[6], s[7], Work.getState(s[4])]) except: pass
def __load(): global __dict path = "%s/Dict" % Data.__dataPath if os.path.exists(path): try: __dict = Data.__unpickle(path) PMS.Log("(Framework) Loaded the dictionary file") except: PMS.Log( "(Framework) The dictionary file is corrupt & couldn't be loaded" ) __loadDefaults() else: __loadDefaults()
def Run(self): while (1): (self.x, self.y) = Data.LinearModelDataGenerator(self.basis, self.weight, self.a) self.X.append(self.x) self.Y.append(self.y) print("Add data point ( {} , {} ):".format(self.x, self.y)) self.counter += 1 if self.__OnlineLearning(self.mean, self.covariance, [[self.x**i for i in range(self.basis)]]): break print("=" * 25) self.__Visualization()
def statistical_outlier_detection(data, vector_index=0): if len(np.shape(data)) == 1: data = np.array([data]) q25 = np.percentile(np.array(data[vector_index]), 25) q75 = np.percentile(np.array(data[vector_index]), 75) iqr = q75 - q25 threshold = iqr * 1.5 median = Data.median(data[vector_index]) outliers = [] for observation in range(len(data[vector_index])): if median - threshold > data[vector_index][observation] or median + threshold < data[vector_index][observation]: outliers.append(observation) return outliers
def favorites_setting_query(update: Update, context: CallbackContext) -> None: chat_id = update.message.chat_id chat_data = Data.update(chat_id) last_fav_set = chat_data[chat_id][QUERIES][FAV_SETTING] if last_fav_set != -1: try: update.message.bot.deleteMessage(chat_id=chat_id, message_id=last_fav_set) except: pass buttons = [InlineKeyboardButton(text=COMPLETE_SRC_DEST[mode] , callback_data="%s 0 %s" % (FAV_SETTING, mode)) for mode in [SRC, DEST]] markup = InlineKeyboardMarkup([buttons]) query = update.message.reply_text(FAV_SETTING_COMMAND_MESSAGE, reply_markup=markup) query_id = query.message_id chat_data[chat_id][QUERIES][FAV_SETTING] = query_id chat_data[chat_id][QUERIES][LAST][ID] = query_id chat_data[chat_id][QUERIES][LAST][POINTER] = FAV_SETTING Data.write(chat_data) unrecognized_zero(chat_id)
def search(): results = {} df = Data.get_ticker_metadata() df = df[df['Exchange_Sector'] == 'NASDAQ_Technology'] universe_of_tickers = df.index.tolist() tickers = Data.get_tickers_with_good_data(universe_of_tickers) for ticker in tickers: try: predictor_tickers = ['NASDAQ:AAPL', 'NASDAQ:AMZN'] # [Cp.ticker_benchmark] model, HitRate = find_best_machine_learning_model( predictor_tickers, ticker) results[ticker] = [model, HitRate] print({ticker: [model, HitRate]}) except: print('Problem: {}'.format(ticker)) pass df = pd.DataFrame.from_dict(results, orient='index') df.rename(columns={0: 'Model', 1: 'HitRate'}, inplace=True) print(df) df.to_csv(Cp.files['ML'])
def split_data(self, data, verbose=False): print("Splitting data...") self.x_train, self.y_train, self.x_val, self.y_val, self.test = Data.seperate( data, '2016-03-27', '2016-04-24') if (verbose): print(self.x_train.shape) print(self.y_train.shape) print(self.x_val.shape) print(self.y_val.shape) print(self.x_train[features].shape) print(self.x_val[features].shape) self.d_col_val = 1886
def distances_dist(datasets, conditions, scale=1): plt.close("all") fig, ax = plt.subplots() bins = np.linspace(0, 1, num=11) for i, dataset in enumerate(sorted(datasets)): data = Data(dataset).get_df() sm_cols = [ col for col in data.col_action if data.col_action[col] == 'se' ] print(dataset) distances = [] for sm_col in sm_cols[:1]: print('Column name: %s' % sm_col) A = data.df[sm_col][:10000].astype(str) B = data.df[sm_col].unique().astype(str) sm = similarity_matrix(A, B, conditions['Distance'], -1) # print(sm.shape) # # take the 10% highest distances for each value # sm_nmax = np.array([sorted(row)[:-1] # for row in sm]) # distances += list(sm_nmax.ravel()) # bin_counts = [0 for bin in bins] # bin_width = 1/(len(bins)-1) # distances2 = np.zeros(len(distances)) # for i, distance in enumerate(distances): # bin_number = int(distance // bin_width) # bin_counts[bin_number] += 1 # distances2[i] = bin_number * bin_width # bin_counts = np.array(bin_counts) #/len(distances) # s = interpolate.interp1d(bins, bin_counts) # kernel = stats.gaussian_kde(distances2, bw_method=.6) x = np.linspace(0, 1, 11) # y = list(reversed(list(accumulate(list(reversed(bin_counts)))))) # plt.semilogy(x, s(x)*scale, label=dataset) plt.semilogy(x, ball_elements(sm, bins) / sm.shape[0], label=dataset) plt.legend(fontsize=14) sns.plt.xlim([0, 1]) sns.plt.ylim([1, 2000]) # sns.despine(bottom=False, left=False, right=True, trim=True) # plt.yticks([], []) # y_ticks = np.array([val/10 for val in ax.get_yticks()]) # ax.set_yticklabels(y_ticks) ax.set_xlabel('Similarity', fontsize=16) ax.tick_params(axis='x', which='major', labelsize=14) filename = 'DistanceDist_' + '_'.join( [key + '-' + conditions[key] for key in conditions]) + '.pdf' plt.savefig(os.path.join(os.getcwd(), '..', 'figures', filename), transparent=False, bbox_inches='tight', pad_inches=0.2)
def RunBackTesting(TS_ID): dfds = d.GetTimeSeries(TS_ID) targets = dfds['target'].unique() for target in targets: titleName = TS_ID + '_' + target df = dfds[dfds['target'] == target] #BackTesting back = Backtesting(df) back.RunBackpropagation(minPeriods=6, chartTitle=titleName, metric='AlgoChangeDetected') back.GenerateGIF(titleName=titleName)
def get_data(self, data_files, setting, names): """ Get the Data object :param data_files: the pathname of the data files :param setting: the Setting object :param names: the Names object :return: the Data object """ # If one data file if len(data_files) == 1: data_file = data_files[0] # Get X and y X, y = self.get_X_y(data_file, names) elif len(data_files) == 2: training_data_file = data_files[0] if 'train' in data_files[ 0] else data_files[1] testing_data_file = data_files[0] if 'test' in data_files[ 0] else data_files[1] # Get X_train and y_train X_train, y_train = self.get_X_y(training_data_file, names) # Get X_test and y_test X_test, y_test = self.get_X_y(testing_data_file, names) # Combine training and testing data X = pd.concat([X_train, X_test]) y = pd.concat([y_train, y_test]) else: print("Wrong number of data files!") exit(1) # Encode X and y X, y = self.encode_X_y(X, y, setting, names) # Update the name of features names.features = np.array(X.columns) # Transform X from dataframe into numpy array X = X.values # Oversampling when y is imbalanced if len(np.unique(np.unique(y, return_counts=True)[1])) != 1: ros = RandomOverSampler(random_state=setting.random_state) X, y = ros.fit_sample(X, y) data = Data.Data(X, y) return data
def Cleandict(self): for i in Main_Menu.Liste: try: i[0].destroy() Main_Menu.ipListe.remove(i[1]) except: print(Data.get_time() + "[SettingsMenu]error, must have deleted the dict") self.root.update() Main_Menu.Liste = [] Data.dict = {}
def load_data(): global data_collection start = time.time() if os.path.exists(paras.DATA_COL): with open(paras.DATA_COL, 'rb') as f: data_collection = pickle.load(f) else: vocab = Vocab.Vocab() schemas = Schema.load_schema() train_data = Data.load_data(paras.TRAIN_DATA_MERGE, schemas) test_data = Data.load_data(paras.TEST_DATA, schemas) train_data.get_indexes(vocab) test_data.get_indexes(vocab) print(Data.tot1) print(Data.tot2) print('train_data number:', len(train_data.data)) data_collection = DataCollection(vocab, schemas, train_data, test_data) with open(paras.DATA_COL, 'wb') as f: pickle.dump(data_collection, f) end = time.time() data_collection.vocab.print_info() data_collection.schemas.print_info() print('load data time cost:', end - start)
def close_new(self, y): """ Create a new account with name y. :param y: string """ self.transaction_list = Data.new(str(y)) self.labels = [] self.account_name = str(y) self.total = 0 self.s.set(self.account_name) self.t.set("$" + str(self.total)) self.new_window.destroy() for x in self.labels: x.destroy()
def showSuggestions(): #create combinations of everythig set result = combinations(Data.getEverything(), 5) #randomly print one combination to the user print("\n====================================\n") print("\n ~~~~~~~People also bought: ~~~~~~~~\n ") #print("Number of combinations", len(list(result))) #change hardcoded ma to the length of combinations randomSuggestion = random.randint(1,1533939-1) #print(list(result)[randomSuggestion]) #for l in list(result): # print(l) return list(result)[randomSuggestion]
def case_sims(): # Case 1: Run 1 simulation for all tickers. Use this to get EOD orders (metrics + orders/trades) df = Data.get_ticker_metadata() # run_simulation(simulation_name='All', universe_of_tickers_df=df, generate_outputs=True) # Case 2: Run 1 simulation for a specific industry_group or sector (metrics, backtest + orders/trades # df = df[df['Exchange_Sector'] == 'NASDAQ_Technology'] # df = df[df['Exchange_Sector'] == 'LON_Financials'] df = df[df['Exchange_Sector'] == 'NYSE_Finance'] # df = df[df['Exchange_Sector'].isin(['NASDAQ_Technology', 'NYSE_Finance', 'LON_Financials'])] #df = df[df['Exchange_Sector'].isin(['LON_Financials', 'LON_Consumer', 'LON_Industrials', 'LON_Basic Materials', 'LON_Consumer Goods', 'LON_Health Care', 'LON_Technology', 'LON_Oil & Gas'])] run_simulation(simulation_name='Single', universe_of_tickers_df=df, generate_outputs=True)
def update_clock(self): global ipListe global Liste for i in Liste: try: if (Data.get_sec() - Data.dict[i[1]][1]) > 62: i[0].destroy() ipListe.remove(i[1]) Liste.remove(i) except: print(Data.get_time() + "[Main_Menu]error, must have deleted the dict") try: for ip in Data.dict: if (int(Data.get_sec()) - Data.dict[ip][1]) < 61 and ip not in ipListe: self.Bok = Button( self.windows, text=Data.dict[ip][0], command=lambda ip=ip: LiveChating.App(ip)) self.Bok.config(height=1, width=40) self.Bok.pack() Liste.append((self.Bok, ip)) ipListe.append(ip) self.windows.update() except: print(Data.get_time() + "[Main_Menu]error in dict handled") self.windows.update() self.windows.after(1000, self.update_clock)
def main(): if len(sys.argv) != 6: logging.info( 'please input args: car_path, road_path, cross_path, answerPath') exit(1) car_path = sys.argv[1] road_path = sys.argv[2] cross_path = sys.argv[3] preset_answer_path = sys.argv[4] answer_path = sys.argv[5] logging.info("car_path is %s" % (car_path)) logging.info("road_path is %s" % (road_path)) logging.info("cross_path is %s" % (cross_path)) logging.info("preset_answer_path is %s" % (preset_answer_path)) logging.info("answer_path is %s" % (answer_path)) # 导入数据 roads, cars, crosses, pre_path, preset_time = Data.load_data( road_path, car_path, cross_path, preset_answer_path) # 创建模型实例 graph = Class.Graph(roads, crosses) pre_cars = cars[cars[' preset'] == 1] # 预置车辆 our_cars = cars[cars[' preset'] == 0] # 非预置车辆 # 更新预置车辆的实际start_time pre_cars = Data.sort_priority(pre_cars) pre_cars = Data.update_st(pre_cars, preset_time) our_cars = Data.sort_priority(our_cars) # print('pre', our_cars) with open(answer_path, 'w') as f: f.write("#(carId,StartTime,RoadId...)\n") # 预置车辆交通状态初始化 # initial_state(graph, pre_path, pre_cars) # 车辆调度 answer = Path.plan(graph, pre_cars, our_cars, pre_path, answer_path)
def run1(modeUserChoice, recommOnOff): ''' Dynamical version: runs the interaction user//data//recommender mode: "sigmoid" or "random" ''' psdata = Data.PseudoData("Generate", nItems) psuser = User.Users(range(psdata.N), psdata.Dict, "GenerateAI", psdata.kwordMatrix, nUsers) stat = [] for sweep in xrange(nbRuns): statRec = 0 for user in xrange(psuser.N): if recommOnOff == "on": #extract users's cluster pref = np.nonzero(psuser.PrefMatrix[0])[0][0] cluster = np.nonzero(psuser.PrefMatrix[:, pref])[0] scores, prelistRecomm = myrec.recommender( user, cluster, psuser.dataItemsHist) elif recommOnOff == "off": prelistRecomm = myrec.pickRand(2, range(psdata.N)) prelistRandom = myrec.pickRand(4, range(psdata.N), prelistRecomm) listotal = prelistRandom + prelistRecomm if len(mypd.uniq(listotal)) != len(listotal): print "duplicated items", prelistRandom, prelistRecomm sys.exit(1) #else: # print "pass" probas = map( lambda x: myalg.sigmoid( myalg.Utilityfct(user, x, psuser.PrefMatrix, psdata. kwordMatrix)), listotal) if modeUserChoice == "sigmoid": picked = listotal[myalg.userChoice(probas)] elif modeUserChoice == "random": picked = myrec.pickRand(1, listotal)[0] psuser.dataItemsHist[user, picked] += 1 if picked in prelistRecomm: statRec += 1 print sweep, "stat: ", modeUserChoice, recommOnOff, 1. * statRec / psuser.N print "max ", np.max(psuser.dataItemsHist) stat.append(1. * statRec / psuser.N) return np.mean(stat), np.std(stat)
def remove_animal(): data = request.get_json().upper() #making response whether the animal name given exits in the database res = make_response(jsonify(not animal_list.check_Animal(data))) if not animal_list.check_Animal(data): #checking name given exists in the database list = animal_list.getList() for animal in list: if data.upper() == animal.get_Name(): #updates the stats (removes any data associated with the animal) Data.update_MapData(animal.get_Range(), "remove") Data.update_DietData(animal.get_Diet(), "remove") Data.update_ClassData(animal.get_Class(), "remove") Data.update_StatusData(animal.get_Status(), "remove") animal_list.remove_Animal(animal) #removes animal object from the database #saves the current database list exit() return res
def readChar(self, parenStack, interiorStringRepStack, char): if char == '(': # add to the stack parenStack.append('(') interiorStringRepStack.append([]) return MiddleState.Instance() elif char == ')': # from Accept state, getting a close paren means Reject print("Unbalanced parentheses") raise Data.ParseException() return RejectState.Instance() else: # for any other character, stay in the current level; append to interiorStringRep interiorStringRepStack[-1].append(char) return TopLevelState.Instance()
def changed(self): #根据选择更改数据 self.show() for item in self.table.selection(): tes = self.table.item(item, 'values') print(tes, self.ppp) if self.ppp[0] == 1: if tes[1] == '无权限': Data.adminP1(tes[0], 1) else: Data.adminP1(tes[0], 0) else: if tes[2] == '无权限': Data.adminP2(tes[0], 1) else: Data.adminP2(tes[0], 0) self.initData() return
def adding_task(task_title, task_key, deadline, deadtime): starting = datetime.datetime.utcnow().replace(tzinfo=pytz.utc) title = task_title time_zone = starting.astimezone(get_localzone()) startdate = time_zone.date() starttime = time_zone.time().replace(microsecond=0) print deadline, deadtime timeLeft = time_left(deadline, deadtime) recommend = Recommendations.give_recommend(dead(deadline, deadtime), task_key, the_list) tasks_dict.update({ task_key: { "title": title, "deadline": deadline, "deadtime": deadtime, "start date": startdate, "start time": starttime, "time left": timeLeft, "Recommendation": recommend }, }) Data.write_task(task_key) # add to LLFeature here LLFeatures.linked_list.append(task_key)
def find_cointegrated_pairs_by_exchange_sector(start_date='2006-01-01'): cointegrated_pairs = [] universe_of_tickers_df = Data.get_ticker_metadata() exchange_sectors = universe_of_tickers_df['Exchange_Sector'].unique( ).tolist() try: exchange_sectors.remove('NYSE_Macro') except: pass for exchange_sector in exchange_sectors: df_tm = universe_of_tickers_df[ universe_of_tickers_df['Exchange_Sector'] == exchange_sector] tickers = df_tm.index.tolist() print(exchange_sector) for ticker_pair in list(itertools.combinations(tickers, 2)): try: df = Data.create_df_from_tickers(list(ticker_pair), start_date=start_date) is_coint, hr, spreads = is_pair_cointegrated( df, ticker_pair[0], ticker_pair[1]) if is_coint: cointegrated_pairs.append(ticker_pair) print('{} Good'.format(ticker_pair)) else: print(ticker_pair) except: print('Problem with: {}'.format(ticker_pair)) with open(Cp.files['coint_pairs'], 'w') as f: for pair in cointegrated_pairs: f.write('{}_{}\n'.format(pair[0], pair[1]))
def json_write( filename ): # Writes data from JSON file into the pdfMetaDataAdd function jsonName = filename[:-5] with open(filename) as f: json_array = json.load(f) for doc in json_array['Docs']: index_details = { "Id": None, "Reference Number": None, "Date Created": None, "Keywords": None, "Author": None } index_details['Id'] = doc['Id'] index_details["Reference Number"] = doc['Reference'] index_details["Date Created"] = doc['Created'] index_details["Keywords"] = doc["Description"] index_details["Author"] = doc["Cabinet"] Data.pdfMetaDataAdd(index_details['Id'], index_details['Reference Number'], index_details["Author"], index_details["Date Created"], index_details['Keywords'], jsonName)
def mutants(self): '''Returns a mutant collection instance initialised with the contents of the mutants argument''' mutantCollection = None if self.opt.has_key('--mutants'): mutantDir = self.opt['--mutants'] #remove the trailing slash if present so split will work if mutantDir[-1:] == os.sep: mutantDir = mutantDir[:-1] pathComponents = os.path.split(mutantDir) mutantCollection = Data.MutantCollection( name=pathComponents[1], location=pathComponents[0]) return mutantCollection
def save(self, val): print 'saving' print self.comment.text() if self.comment.text( ) != "Type your comment here..." and self.comment.text() != '': currentDate = str(time.strftime("%Y%m%d")) # global Selected_ex_angle global Last_exercise_list # targetAngle = str(Selected_ex_angle) # holdDuration = str(np.average(Last_exercise_list)) # print holdDuration # Score = 1 # if holdDuration< 120: # Score = 1 # elif holdDuration <240: # Score =2 # elif holdDuration <400: # Score = 3 Comment = str(self.comment.text()) # if Score == 1: # Advice = 'Try holding for a bit longer' # elif Score == 2: # Advice = 'Nearly there! Try holding the position just a little longer!' # elif Score == 3: # Advice = 'Keep it up!' self.Score = str(self.Score) self.holdDuration = str(self.holdDuration) # print currentDate, type(currentDate), self.targetAngle, type(targetAngle), holdDuration, type(holdDuration),Score, type(Score), Comment, type(Comment),Advice, type(Advice) new_object = Data.Exercises(currentDate, self.targetAngle, self.holdDuration, self.Score, Comment, self.Advice) Data.saveObject('Data_backend.txt', new_object) self.pushButton.click() Email.send_email() else: QMessageBox.about(self, "ERROR", "Please leave a comment")
def sampleReadTxtSample(self, file_name): #opening the file with open(file_name,"r") as student_file: #count i = 0 #list of data data_list = [] #wrong data function wrong_data_function = Data() #regular expression expression = r"^([0-9]{1,3}[.]){3}[0-9]{1,3}(,[A-z]*[0-9]*){2},([A-z]+[ ]?)+,[0-9]{1,3}(,[0-9]{10,13}){2}$" #get all the line for line in student_file: #if data not empty if len(line)!=1: try: assert re.match(expression,line) is not None #if line is not a correct data except AssertionError: if i == 0: break continue i += 1 #if data is correct else: #data object data_object = Data() data_object.dataReadTxtData(line) data_list.append(data_object) i += 1 #if data is empty skip it else: i += 1 continue #add the datas to the sample self.datas = data_list
def Find(self, s): """ Find all occurrences of the given string, returning a list of Hit objects. Search through all objects and their properties. """ #. do case-insensitive #. handle > find olk in words, ie restrict by adj also list = Data.List() for obj in self.rootdict.itervalues(): for prop in obj: if s in prop: attrib = Attrib(obj, prop) hit = Hit(attrib) #,location or occurrence?) list.append(hit) return list
def loadcoords(planets): coords = {} #load planets planetscoords = set() for planetname in planets: for (x, y) in planets[planetname]: planetscoords.add((x, y)) coords["planets"] = planetscoords #load other coords (missiles, asteroids and eventually others) patternbegin = "coords_" patternend = ".txt" filenames = os.listdir(Core.CONFIGDIR) for filename in filenames: name = getnamefrom_coords_name_txt(filename, "coords_", ".txt") if (name != None and name != "planets" and name not in coords): coords[name] = Data.readcoordsfile(Core.CONFIGDIR+filename) return coords
def recognize(self, real_data): data = Data.extracted_load(real_data) # initialize the output character list string = '' init_ltr = str(unvectorize_output(self.step_net(data[0][0]))) string = string + init_ltr row_ctr = 0 print data[0][0] for i in xrange(1, len(data)): ltr = str(unvectorize_output(self.step_net(data[i][0]))) if data[i][1][0] > row_ctr: string = string + '\n' + ltr row_ctr = row_ctr + 1 elif data[i][1][1] == 0: string = string + " " + ltr else: string = string + ltr return string
def run(self, report_criteria, save_criteria, termination_criteria, batch_size=1): crit_objs = [self.__make_crit(), self.__make_crit(), self.__make_crit()] while True: t0 = time.clock() for i in range(batch_size): self.doGeneration() if self.need_sort: self.genelist.sort() tdelta = time.clock() - t0 gdelta = batch_size self.generation += gdelta self.time += tdelta for c in crit_objs: c["generations"] += gdelta c["time"] += tdelta a = self.getBest() rep = Data.make_report(a[1], a[0], self.generation, termination_criteria["generations"], self.time, termination_criteria["time"]) if self.checkCriteria(report_criteria, crit_objs[0]): self.genes["report"](rep) if self.checkCriteria(save_criteria, crit_objs[1]): self.saveHistory() if self.checkCriteria(termination_criteria, crit_objs[2]): break return