def create_hybrid_per_total_percent_month(): """create graph""" tmp = [] for i, j in list(zip(np(total_by_month)[-1], np(total_by_month)[0]))[1:-3]: tmp.append((i / j) * 100 // 0.01 / 100) line_chart = pygal.Line(x_label_rotation=90) line_chart.title = "เปอเซ็นของรถยนต์ไฮบริดในแต่ละเดือนเทียบกับรถยนต์ทุกประเภท" line_chart.x_labels = all_months() line_chart.add("hybrid", tmp) line_chart.render_to_file('All_hybrid_percent_month.svg')
def create_hybrid_change_percent(): """create graph""" tmp = [0] for i in range(2, len(np(total_by_month)[-1][:-3])): tmp.append( np(total_by_month)[-1][i] - np(total_by_month)[-1][i - 1]) line_chart = pygal.Line(x_label_rotation=90) line_chart.title = "อัตราการเปล่ยนแปลงของรถยนต์ไฮบริดจากเดือนก่อนหน้า" line_chart.x_labels = all_months() line_chart.add("change from previous month", tmp) line_chart.render_to_file('Change_hybrid_prevmonth.svg')
def matrixReshape(nums, r, c): """ :type nums: List[List[int]] :type r: int :type c: int :rtype: List[List[int]] """ len_of_orig_mat = len(nums) * len(nums[0]) len_of_output_mat = r * c if len_of_orig_mat != len_of_output_mat: return nums else: print(type(np(nums, r * c).tolist())) return np(nums, r * c)
def create_all_sum(): """create graph""" bar_chart = pygal.Bar(x_label_rotation=90) bar_chart.title = "ผลรวมของรถยนต์แต่ละยี่ห้อ" for i, j in zip(all_by_month.index.values, np(all_by_month)): bar_chart.add(i, sum(j)) bar_chart.render_to_file('All_cars_each_brand.svg')
def create_hybrid_sum(): """create graph""" bar_chart = pygal.Bar(x_label_rotation=90) bar_chart.title = "ผลรวมรถยนต์ไฮบริดแต่ละประเภท" for i in np(hybrid_by_month): bar_chart.add(i[0], sum(i[1:])) bar_chart.render_to_file('All_hybrid_each_type.svg')
def create_hybrid_month(): """create graph""" bar_chart = pygal.StackedBar(x_label_rotation=90) bar_chart.title = "ผลรวมของรถยนต์ไฮบริดแต่ละประเภทของแต่ละเดือน" bar_chart.x_labels = all_months() for i in np(hybrid_by_month): bar_chart.add(i[0], i[1:]) bar_chart.render_to_file('All_hybrid_each_month.svg')
def create_all_month(): """create graph""" bar_chart = pygal.Bar(x_label_rotation=90) bar_chart.title = "ผลรวมของรถยนต์แต่ละเดือน" bar_chart.x_labels = all_months() tmp = np(total_by_month)[0] bar_chart.add(tmp[0], tmp[1:]) bar_chart.render_to_file('All_cars_each_month.svg')
def megaFaker(): with open('1000.json', 'r') as handle: milSemillas = js(handle) palabras = [ "como ", "cuando ", "donde queda ", "como encontrar ", "que hacer en ", "obtener " ] categoria = [ "animal", "carmodle", "moviesTitle", "NameOfCompany", "Drug", "uni", "apps" ] rCategoria = (str(categoria[rd(0, 6)])) rSemilla = (np(milSemillas)[rd(1, 100)][rCategoria]) rPalabras = (str(palabras[rd(0, 5)])) return (str(rPalabras + rSemilla))
def merge(trovo, daily): '''need size from solve ''' factorImport = open(glob(r'\\xsqnfs2.nyc.ime.reuters.com\TRPS\Bank Loans\Auto 2.0\Factor*.csv')[0], 'r') readFactors = csv.reader(factorImport) staleFactor = 10 #default for row in readFactors: if "Stale" in row[0] and row[1] not in (None, "", " "): staleFactor = float(row[1]) factorImport.close() concatIndex = len(trovo.columns) trovo.insert(concatIndex, 'Concatenated', 1) #initializing stale column trovo.iloc[:, concatIndex] = trovo['lin'] + '_' + trovo['dealer'] #making unique lin for identification trovo = pd.merge(trovo, daily, how = 'left', on = 'Concatenated') #merging days unchanged/size/stale data unchangedIndex = trovo.columns.tolist().index('unchanged_for') daysStale = trovo.columns.tolist().index('days_stale') lastBidDate = trovo.columns.tolist().index('last_bid_date') trovo.iloc[:, daysStale] = trovo['days_stale'] + 1 trovo.iloc[:, unchangedIndex] = trovo['unchanged_for'] + 1 '''need size data still''' staleIndex = len(trovo.columns) trovo.insert(staleIndex, 'Not Stale', 1) #initializing stale column trovo.insert(staleIndex + 1, 'Color', 0) #initializing color column now = timeComp.now() year = now.year month = now.month day = now.day trovo.iloc[:, lastBidDate] = toDateTime(trovo['last_bid_date'], errors = 'coerce') trovo.iloc[:, daysStale] = [0 if ~(x == date64('NaT')) and (x.year == year) and (x.month == month) # updating quotes from today to be stale 0 days and (x.day == day) else y for (x, y) in list(zip(trovo['last_bid_date'], trovo['days_stale']))] trovo.iloc[:, staleIndex] = np((trovo['days_stale'] > staleFactor), 0, 1) #killing quotes stale for more than 10 days #outputting intermediate file after cleansing trovo.to_csv(r'\\xsqnfs2.nyc.ime.reuters.com\TRPS\Bank Loans\Auto 2.0\Place Results Here\FilterFile' + dateLabel.today().strftime('%m-%d-%Y') + '.csv', index = False) return trovo
def predictModel(categories): #using the trained model to predict #categories=["Graphics cards", "Monitors", "Processors", "Mouse", "Keyboards"] num_cat = [] prediction = [] #TODO: change this to search for the tags/categories? (Data which user clicks on) for cat in categories: if cat == "Graphics cards": num_cat.append("1") if cat == "Monitors": num_cat.append("2") if cat == "Processors": num_cat.append("3") if cat == "Mouse": num_cat.append("4") if cat == "Keyboards": num_cat.append("5") else: num_cat.append("6") while len(num_cat) < 18: num_cat.append("0") num_cat_numpy = [] num_cat_numpy.append(num_cat) num_cat_numpy = np(num_cat_numpy) num_cat_numpy = nump.reshape( num_cat_numpy, (num_cat_numpy.shape[0], num_cat_numpy.shape[1])) print("0: ", num_cat_numpy.shape[0], "1: ", num_cat_numpy.shape[1]) prediction = model.predict(num_cat_numpy).argmax() print("Prediction number: ", prediction) predict = NumberToPredCat(prediction) #predict=InvertPred(cat) return InvertPred(predict)
def main(): """Flatten""" import pandas as pd from numpy import array as np import pygal df = pd.read_excel("data.xlsx") df.set_index("BRAND", inplace=True) hybrid_types = ("Electric", "Petrol Hybrid", "Diesel Hybrid", "Plug-In Petrol Hybrid") total_by_month = df.loc["TOTAL", ["TYPE"] + all_months()] total_by_year = df.loc["TOTAL", ["TYPE"] + [str(i) + "-YTD" for i in range(2012, 2018)]] all_by_month = df.loc["ALFA ROMEO":"YAMAHA", all_months()] hybrid_by_month = total_by_month.loc[total_by_month["TYPE"].isin( hybrid_types)] brand_hybrid_by_month = df.loc[df["TYPE"].isin(hybrid_types), ["TYPE"] + all_months()] sum_brand_hybrid = [(i, j[0], sum(j[1:])) for i, j in \ zip(brand_hybrid_by_month.index.values,np(brand_hybrid_by_month)) if i != "TOTAL"] def create_brand_hybrid_sum(): """create graph""" bar_chart = pygal.Bar(x_label_rotation=90) bar_chart.title = "ผลรวมของรถยนต์ไฮบริดแต่ละรุ่น" for i, j, k in sum_brand_hybrid: bar_chart.add(i + " " + j, k) bar_chart.render_to_file('All_brands_of_hybrids.svg') create_brand_hybrid_sum() def create_all_month(): """create graph""" bar_chart = pygal.Bar(x_label_rotation=90) bar_chart.title = "ผลรวมของรถยนต์แต่ละเดือน" bar_chart.x_labels = all_months() tmp = np(total_by_month)[0] bar_chart.add(tmp[0], tmp[1:]) bar_chart.render_to_file('All_cars_each_month.svg') create_all_month() def create_all_sum(): """create graph""" bar_chart = pygal.Bar(x_label_rotation=90) bar_chart.title = "ผลรวมของรถยนต์แต่ละยี่ห้อ" for i, j in zip(all_by_month.index.values, np(all_by_month)): bar_chart.add(i, sum(j)) bar_chart.render_to_file('All_cars_each_brand.svg') create_all_sum() def create_hybrid_month(): """create graph""" bar_chart = pygal.StackedBar(x_label_rotation=90) bar_chart.title = "ผลรวมของรถยนต์ไฮบริดแต่ละประเภทของแต่ละเดือน" bar_chart.x_labels = all_months() for i in np(hybrid_by_month): bar_chart.add(i[0], i[1:]) bar_chart.render_to_file('All_hybrid_each_month.svg') create_hybrid_month() def create_hybrid_sum(): """create graph""" bar_chart = pygal.Bar(x_label_rotation=90) bar_chart.title = "ผลรวมรถยนต์ไฮบริดแต่ละประเภท" for i in np(hybrid_by_month): bar_chart.add(i[0], sum(i[1:])) bar_chart.render_to_file('All_hybrid_each_type.svg') create_hybrid_sum() def create_hybrid_per_total_percent_month(): """create graph""" tmp = [] for i, j in list(zip(np(total_by_month)[-1], np(total_by_month)[0]))[1:-3]: tmp.append((i / j) * 100 // 0.01 / 100) line_chart = pygal.Line(x_label_rotation=90) line_chart.title = "เปอเซ็นของรถยนต์ไฮบริดในแต่ละเดือนเทียบกับรถยนต์ทุกประเภท" line_chart.x_labels = all_months() line_chart.add("hybrid", tmp) line_chart.render_to_file('All_hybrid_percent_month.svg') create_hybrid_per_total_percent_month() def create_hybrid_change_percent(): """create graph""" tmp = [0] for i in range(2, len(np(total_by_month)[-1][:-3])): tmp.append( np(total_by_month)[-1][i] - np(total_by_month)[-1][i - 1]) line_chart = pygal.Line(x_label_rotation=90) line_chart.title = "อัตราการเปล่ยนแปลงของรถยนต์ไฮบริดจากเดือนก่อนหน้า" line_chart.x_labels = all_months() line_chart.add("change from previous month", tmp) line_chart.render_to_file('Change_hybrid_prevmonth.svg') create_hybrid_change_percent()
def megaFaker(): try: with open('1000.json', 'r') as milSemillas: milSemillas = js(milSemillas) except Exception as e: print(str(e) + "< Archivo no existe!") exit() categoria = [ "animal", "carmodle", "moviesTitle", "NameOfCompany", "uni", "apps" ] rCategoria = (str(categoria[rd(0, (len(categoria)) - 1)])) #####################Entrenar IA mejor dese txt############# if (rCategoria == "animal"): palabras = [ "donde encontar ", "habita ", "comida ", "peloigro de extinción ", "venenoso ", "composicion osea ", ] elif (rCategoria == "carmodle"): palabras = [ "que es ", "a que sabe ", "donde queda ", "como encontrar ", "ver " ] elif (rCategoria == "moviesTitle"): palabras = [ "fecha de lanzamiento de ", "reparto en ", "secuelas de ", "resumen ", "protagonista en ", "elenco de ", "descargar de ", "animacion de ", "calidad de ", "cinematic de ", "musica de ", "esenario en ", "Historia en", "catetgoria de la ", "errores de la ", "trama de", "arte de ", "libro de ", "escritor de ", "gion de ", "etapas de ", ] elif (rCategoria == "NameOfCompany"): palabras = [ "fecha de creacion ", "competencia de la ", "estrategia de la ", "vision de la ", "mision de la ", "capacidad de trabajadores ", "beneficios de la ", ] elif (rCategoria == "uni"): palabras = [ "requisitos para entara en ", "carrera ", "donde queda la ", "como encontrar la ", "que hacer en la ", "obtener beca en la " ] if (rCategoria == "apps"): palabras = [ "Version ", "git ", "licencia ", "como encontrar ", "que hacer en la ", "animacion ", "keys ", "free ", "gratis ", "obtener ", "descargar " ] rSemilla = np(milSemillas)[rd(0, 999)][rCategoria] rPalabras = str(palabras[rd(0, (len(palabras)) - 1)]) #################### Documentacion ####################### global rn doc = """ --------------------Thankas for play!----------- Al azar python3 Tiempo segunso = [{}] Tiempo minutos = [{}] Tema = [{}] Categoria = [{}] Accion a concatenar = [{}] cantidad de palabras = [{}] ------------------------------------------------ """.format(rn(), (str(rn() / 60)), rCategoria, rSemilla, rPalabras, (len(milSemillas))) print(doc) ########################################################## palaGenerada = str(rPalabras + rSemilla) #lis.append(palaGenerada) return (str(rPalabras + rSemilla))
def display(): print("\x1b[1;1H( WORKING )\n") if MAKE_DISPLAY_VIDEO or SHOW_DISPLAY: frame = [] for i in range(len(field)): frame_row = [] for j in range(len(field[i])): f = field[i][j] # choose colored cell for graphics graphic = "--" if f == 0: # water if (i <= LIGHT_HEIGHT): graphic = "\x1b[38;2;40;40;255m" # light frame_row.append([200, 200, 120]) else: graphic = "\x1b[38;2;0;0;200m" # shadow frame_row.append([120, 120, 60]) elif f == 1: # cell for k in cells: if k.cords == [i, j]: if k.kind == 0: # fotosynth graphic = "\x1b[38;2;0;255;0m" frame_row.append([80, 180, 80]) elif k.kind == 1: # organics graphic = "\x1b[38;2;255;200;0m" frame_row.append([70, 200, 200]) elif k.kind == 2: # carnivorous graphic = "\x1b[38;2;255;0;0m" frame_row.append([20, 20, 120]) break elif f == 2: # food graphic = "\x1b[38;2;100;100;100m" frame_row.append([150, 150, 150]) elif f == 3: # posion graphic = "\x1b[38;2;200;0;255m" frame_row.append([250, 50, 250]) if SHOW_DISPLAY: print(graphic + "@@", end="") frame.append(frame_row) if SHOW_DISPLAY: print() if MAKE_DISPLAY_VIDEO: npframe = cv2.resize(np(frame, dtype=uint8), dim, interpolation=cv2.INTER_AREA) video.write(npframe) if SHOW_STAT or MAKE_DISPLAY_VIDEO or MAKE_STAT_VIDEO: print("\x1b[0m\x1b[J") if MAKE_DISPLAY_VIDEO: print("Display video\n- size :", round(Path(DISPLAY_VIDEO_NAME).stat().st_size / 1000000, 2), "MB\n- duration :", cycle // 120, "s") if MAKE_STAT_VIDEO: print("Statistics video\n- size :", round(Path(STAT_VIDEO_NAME).stat().st_size / 1000000, 2), "MB\n- duration :", cycle // 120, "s") if SHOW_STAT or MAKE_STAT_VIDEO: print("\nLife cycle : ", cycle) print("Amount of cells : ", len(cells)) age = [0] for i in cells: age.append(i.age) print("\nLongest living cell\n- Age : ", max(age)) gen_s = [] kind = 0 for i in cells: if i.age == max(age): gen_s = i.gen_seq kind = i.kind break print("- Kind :", kind) print("- Agorithm :") print(" ", gen_s) amount = [0, 0, 0] for i in [0, 1, 2]: for j in cells: if j.kind == i: amount[i] += 1 print(i, "- Amount :", amount[i]) gray = 0 pink = 0 for i in range(len(field)): for j in range(len(field[i])): f = field[i][j] if f == 2: gray += 1 elif f == 3: pink += 1 print("food - Amount :", gray) print("posion - Amount :", pink) if MAKE_STAT_VIDEO: for i in range(PER[0]): new = [0, 0, 0] for j in range(PER[1])[::-1]: new, frame_stat[i][j] = frame_stat[i][j], new if pink: frame_stat[-1 - int(pink / (FIELD_SIZE[0] * FIELD_SIZE[1]) * PER[0])][-1] = [250, 50, 250] if gray: frame_stat[-1 - int(gray / (FIELD_SIZE[0] * FIELD_SIZE[1]) * PER[0])][-1] = [150, 150, 150] if amount[0]: frame_stat[-1 - int(amount[0] / (FIELD_SIZE[0] * FIELD_SIZE[1]) * PER[0])][-1] = [80, 180, 80] if amount[1]: frame_stat[-1 - int(amount[1] / (FIELD_SIZE[0] * FIELD_SIZE[1]) * PER[0])][-1] = [70, 200, 200] if amount[2]: frame_stat[-1 - int(amount[2] / (FIELD_SIZE[0] * FIELD_SIZE[1]) * PER[0])][-1] = [20, 20, 120] npframe = cv2.resize(np(frame_stat, dtype=uint8), dim_stat, interpolation=cv2.INTER_AREA) video_stat.write(npframe)
def megaFaker(): with open('1000.json', 'r') as milSemillas: milSemillas=js(milSemillas) categoria =[ "animal", "carmodle", "moviesTitle", "NameOfCompany", "uni", "apps" ] rCategoria = (str(categoria[rd(0,(len(categoria))-1)])) #####################Entrenar IA mejor dese txt############# if(rCategoria=="animal"): palabras=[ "donde encontar ", "habita ", "comida ", "peloigro de extinción ", "venenoso ", "composicion osea ", ] elif(rCategoria=="carmodle"): palabras=[ "que es ", "a que sabe ", "donde queda ", "como encontrar ", "ver "] elif(rCategoria=="moviesTitle"): palabras=[ "fecha de lanzamiento de ", "reparto en ", "secuelas de ", "resumen ", "protagonista en ", "elenco de ", "descargar de ", "animacion de ", "calidad de " , "cinematic de ", "musica de " , "esenario en ", "Historia en" , "catetgoria de la " , "errores de la " , "trama de" , "arte de " , "libro de " , "escritor de " , "gion de " , "etapas de ", ] elif(rCategoria=="NameOfCompany"): palabras=[ "fecha de creacion ", "competencia de la ", "estrategia de la ", "vision de la ", "mision de la ", "capacidad de trabajadores ", "beneficios de la ", ] elif(rCategoria=="uni"): palabras=[ "requisitos para entara en ", "carrera ", "donde queda la ", "como encontrar la ", "que hacer en la ", "obtener beca en la " ] if(rCategoria=="apps"): palabras=[ "Version ", "git ", "licencia ", "como encontrar ", "que hacer en la ", "animacion ", "keys ", "free ", "gratis ", "obtener ", "descargar " ] rSemilla = np(milSemillas)[rd(0,999)][rCategoria] rPalabras = str(palabras[rd(0,(len(palabras))-1)]) return(str(rPalabras+rSemilla))
""" list of all constants used across all game python scripts """ from numpy import array as np STARTING_LEVEL = 5 # screen size (currently 720p) SCREEN_HEIGHT = 720 SCREEN_WIDTH = 1280 # a 1 pixel-wide circle to be enlarged by multiplying it by a desired number size CIRCLE = np([(1, 0), (0.966, 0.259), (0.866, 0.5), (0.707, 0.707), (0.5, 0.866), (0.259, 0.966), (0, 1), (-0.259, 0.966), (-0.5, 0.866), (-0.707, 0.707), (-0.866, 0.5), (-0.966, 0.259), (-1, 0), (-0.966, -0.259), (-0.866, -0.5), (-0.707, -0.707), (-0.5, -0.866), (-0.259, -0.966), (0, -1), (0.259, -0.966), (0.5, -0.866), (0.707, -0.707), (0.866, -0.5), (0.966, -0.259)]) CIRCLE_LARGE = list(CIRCLE * 30) # the color white in RGBA WHITE = [255, 255, 255, 255] # frame rate of game (60) FRAME_RATE = 1 / 60 # screen window title (working title is used SCREEN_TITLE = "Color seeker!" # angle offset for converting tmx angle to Sprite angles
import csv import pandas as pd from sklearn.decomposition import PCA from numpy import zeros as np file_=open('1train_dorothea.csv') w_file=open('VV_train_dorothea.csv','w') X=np((800,100000),dtype=int) #print X[1] row=0 for line in file_: line=line.strip() line_list=line.split(',') for count in line_list: count=int(count) X[row][count-1]=1 row=row+1 #print X[0] df = pd.DataFrame(data=X) df = df.transpose() pca = PCA(n_components=500) pca.fit(df) #print pca.components_ Y=pca.components_ Y=Y.transpose() print X.shape,Y.shape row =0 cloumn=0 while row < 800: cloumn=0 while cloumn < 500: w_file.write(str(Y[row][cloumn]))
import testX, time from numpy import genfromtxt as np data2= np('book.csv', delimiter=',') def Main(): ms = time.time()*1000.0 #data2= np.genfromtxt('book.csv', delimiter=',') t = 1 # panjang waktu sinyal reference (sekond) fs = 256 # frekuensi sampling f = 8.5 # frekusampling data dari epoch ke 0 t1 = 20*256 # sampling data berawal di epoch 0 t2 = 21*256 # sampling data berakhir di epoch ke 255 lowcutoff = 8 highcutoff = 30 data = testX.Bandpass(data2,lowcutoff,highcutoff,fs) Ref = testX.refSignal(t,fs,f) # Reference Signal ms = time.time()*1000.0 r = testX.cca(data,Ref,t1,t2)#(B,C,0,5)# print "Hasil Korelasi CCA =", max(r) print "Waktu Hitung (ms) =", time.time()*1000.0 - ms Main()
def select(clean, hierarchy): #default values, will update if values present in excel file timeDecayF = 30 #volatile, drop this number - days unchanged > 30 gets killed stdDevTol = 1.28 #tolerance for number of std devs staleF = 10 #standard in Trovo factorImport = open(glob(r'\\xsqnfs2.nyc.ime.reuters.com\TRPS\Bank Loans\Auto 2.0\Factor*.csv')[0], 'r') readFactors = csv.reader(factorImport) for row in readFactors: if "Time" in row[0] and row[1] not in (None, "", " "): timeDecayF = float(row[1]) if "Standard" in row[0] and row[1] not in (None, "", " "): stdDevTol = float(row[1]) if "Stale" in row[0] and row[1] not in (None, "", " "): staleF = float(row[1]) factorImport.close() hierarchyDict = {} for row in hierarchy.iterrows(): #inserting dealer rankings into dictionary hierarchyDict.update({row[1][0] : row[1][1]}) finalFile = pd() clean.insert(len(clean.columns), 'DealerW', 0) #initializing dealer weight clean.insert(len(clean.columns), 'UnchangedW', 0) #initializing days unchanged weight clean.insert(len(clean.columns), 'StaleW', 0) #initializing stale weight clean.insert(len(clean.columns), 'Weight', 0) #initializing weights clean.insert(len(clean.columns), 'StdDev', 0) #initializing stdDev clean.insert(len(clean.columns), 'Calculated Bid', 0) #initializing avg bid clean.insert(len(clean.columns), 'Calculated Offer', 0) #initializing avg offer select = len(clean.columns) clean.insert(select, 'Select', 0) #initializing selection column staleIndex = clean.columns.tolist().index('Not Stale') clean.iloc[:, select] = clean.iloc[:, staleIndex] #copying over initial select values from stale values columns = clean.columns.tolist() stdDevIndex = columns.index('StdDev') weightIndex = columns.index('Weight') dealerIndex = columns.index('dealer') lastBidIndex = columns.index('last_bid') lastOfferIndex = columns.index('last_offer') colorIndex = columns.index('Color') unchangedIndex = columns.index('unchanged_for') calcBid = columns.index('Calculated Bid') calcOffer = columns.index('Calculated Offer') days_stale = columns.index('days_stale') dealerWIndex = columns.index('DealerW') unchangedWIndex = columns.index('UnchangedW') staleWIndex = columns.index('StaleW') bidDate = columns.index('last_bid_date') lins = clean.groupby('lin') #grouping quotes by their LINs hasColor = False #color is midpoint now = timeComp.now() year = now.year month = now.month day = now.day for group, quotes in lins: #iterating through each LIN to do quote selection colorQuote = 0 #color quote price midpoint = 0 #midpoint price midpointW = 0 #midpoint weight stdDev = 0 #variable to get std dev hasColor = False numIncluded = 0 #number of quotes considered stdDevList = [] # keep track of bids to avoid double dataFrame loop tempFrame = pd() bestStaleBid = 0 bestStaleAsk = 0 bidPx = 0 offerPx = 0 stdDevDen = 0 bestStaleW = 0 if len(quotes) == 1: #if only quote, must use it for row in quotes.iterrows(): data = row[1].to_frame().transpose() #skipping over index column data.iloc[0, select] = 1 dealer = data.iloc[0, dealerIndex] lastBid = data.iloc[0, lastBidIndex] weight = 0 dealerCheck = hierarchyDict.get(dealer) #dealer val staleVal = data.iloc[0, days_stale] #increment days stale (yesterday's data) daysStale = 0 if nancheck(staleVal): daysStale = 1 else: daysStale = staleVal if dealerCheck == None: dealerW = 0.75 else: dealerW = 1 - (hierarchyDict.get(dealer) - 1)*.05 #dealer weighting unchVal = data.iloc[0, unchangedIndex] #increment days unchanged (yesterday's data) daysU = 0 if nancheck(unchVal): daysU = 1 elif unchVal < daysStale: #error checking because Trovo isn't good daysU = daysStale else: daysU = unchVal #increment days unchanged (yesterday's data) updateW = 0 if timeDecayF < 1: date = data.iloc[0, bidDate] if daysStale != 0: updateW = 0.25 else: diff = 0 diff += 60 * date.hour diff += now.minute - date.minute updateW = (timeDecayF * 400) / (diff + timeDecayF * 400) #400 is max in a trading day, will act nicely elif daysU >= 8 * timeDecayF: #days unchanged cutoff updateW = 0.2 else: #days unchanged weighting updateW = (3 * timeDecayF) / (daysU + (3 * timeDecayF)) '''need size weight''' staleW = (3 * staleF) /(daysStale + (3 * staleF)) #days stale weight weight = dealerW * updateW * staleW data.iloc[0, dealerWIndex] = '{0:.5g}'.format(dealerW) #dealer weighting data.iloc[0, staleWIndex] = '{0:.5g}'.format(staleW) #stale weighting data.iloc[0, unchangedWIndex] = '{0:.5g}'.format(updateW) #unchanged weighting data.iloc[0, weightIndex] = '{0:.5g}'.format(weight) #final weighting tempFrame = concat([tempFrame, data.iloc[0, :].to_frame().transpose()]) midpoint = data.iloc[0, lastBidIndex] bidPx = midpoint offerPx = data.iloc[0, lastOfferIndex] else: for row in quotes.iterrows(): #otherwise, iterate through all quotes and do selection process data = row[1].to_frame().transpose() #skipping over index column dealer = data.iloc[0, dealerIndex] lastBid = data.iloc[0, lastBidIndex] weight = 0 if isinstance(dealer, float): #has color, mark it- dealer is NaN data.iloc[0, lastOfferIndex] = data.iloc[0, lastBidIndex] #need to set offer price for calculations if hasColor: #if two with color, make midpoint average of two colorQuote = (colorQuote + lastBid) / 2 else: hasColor= True colorQuote = lastBid weight = 1 data.iloc[0, colorIndex] = 1 data.iloc[0, dealerWIndex] = '{0:.5g}'.format(1) #dealer weighting data.iloc[0, staleWIndex] = '{0:.5g}'.format(1) #stale weighting data.iloc[0, unchangedWIndex] = '{0:.5g}'.format(1) #unchanged weighting data.iloc[0, weightIndex] = '{0:.5g}'.format(weight) #final weighting else: dealerCheck = hierarchyDict.get(dealer) #dealer val staleVal = data.iloc[0, days_stale] #increment days stale (yesterday's data) daysStale = 0 if nancheck(staleVal): daysStale = 1 else: daysStale = staleVal if dealerCheck == None: dealerW = 0.75 else: dealerW = 1 - (hierarchyDict.get(dealer) - 1)*.05 #dealer weighting unchVal = data.iloc[0, unchangedIndex] #increment days unchanged (yesterday's data) daysU = 0 if nancheck(unchVal): daysU = 1 data.iloc[0, unchangedIndex] = 1 elif unchVal < daysStale: #error checking because Trovo isn't good daysU = daysStale data.iloc[0, unchangedIndex] = daysU else: daysU = unchVal #increment days unchanged (yesterday's data) updateW = 0 if timeDecayF < 1: try: date, time, timeZone = data.iloc[0, bidDate].split(" ") if int(date.split("-")[0]) != year or int(date.split("-")[1]) != month or int(date.split("-")[2]) != day: updateW = 0.25 else: diff = 0 time = time.split(":") diff += 60 * (now.hour - int(time[0])) diff += now.minute - int(time[1]) updateW = (timeDecayF * 400) / (diff + timeDecayF * 400) #400 is max in a trading day, will act nicely except: updateW = 0.25 #either no date of quote or it was before we kept track of time, very old elif daysU >= 8 * timeDecayF: #days unchanged cutoff updateW = 0.2 else: #days unchanged weighting updateW = (3 * timeDecayF) / (daysU + (3 * timeDecayF)) '''need size weight''' '''IF THERE IS SIZE< TIGHTEN STD DEV''' staleW = (3 * staleF) /(daysStale + (3 * staleF)) #days stale weight weight = dealerW * updateW * staleW data.iloc[0, dealerWIndex] = '{0:.5g}'.format(dealerW) #dealer weighting data.iloc[0, staleWIndex] = '{0:.5g}'.format(staleW) #stale weighting data.iloc[0, unchangedWIndex] = '{0:.5g}'.format(updateW) #unchanged weighting data.iloc[0, weightIndex] = '{0:.5g}'.format(weight) #final weighting if data.iloc[0, select] == 1: if weight > midpointW: midpoint = lastBid midpointW = weight numIncluded += 1 #increment n if weight > 0.6 or dealerW == 1: stdDevList.append(lastBid) stdDevDen += 1 if weight > bestStaleW: bestStaleBid = lastBid bestStaleAsk = data.iloc[0, lastOfferIndex] bestStaleW = weight tempFrame = concat([tempFrame, data]) if hasColor: #if color, auto midpoint midpoint = colorQuote if numIncluded == 0: #all stale if len(quotes) > 2: numCalc = np(True, tempFrame['unchanged_for'], 0) midpointUnch = min(numCalc) midpointDraw = np(midpointUnch == tempFrame['unchanged_for'], tempFrame['last_bid'], 0) midpoint = max(midpointDraw) stdDev = 2 tempFrame.iloc[:, select] = np((abs(tempFrame['last_bid'] - midpoint) <= (stdDev*stdDevTol)), 1, 0) numCalc = np((tempFrame['Select'] == 1), tempFrame['last_bid'], 0) numInc = nonZ(numCalc) bidPx = sum(numCalc)/numInc offerPx = sum(np((tempFrame['Select'] == 1), tempFrame['last_offer'], 0))/numInc else: tempFrame.iloc[:, select] = np((abs(tempFrame['last_bid'] - midpoint) <= (stdDev*stdDevTol)), 1, 0) bidPx = bestStaleBid offerPx = bestStaleAsk midpoint = bestStaleBid stdDev = 0 else: '''IF THERE IS SIZE, TIGHTEN STD DEV, else do everything below''' if stdDevDen == 0: stdDev = .25 #make very tight parameter but allow for depth else: stdDev = ((sum([(x - midpoint)**2 for x in stdDevList])/(stdDevDen))**(1/2)) if stdDev < 0.25: stdDev = 0.25 if stdDev > 5.0: if midpoint < 80: stdDev = 5.0 else: stdDev = 3.0 elif stdDev > 3.0: if midpoint >= 80: stdDev = 3.0 tempFrame.iloc[:, select] = np((abs(tempFrame['last_bid'] - midpoint) <= (stdDev*stdDevTol)) & tempFrame['Not Stale'] == 1, 1, 0) numCalc = np((tempFrame['Select'] == 1), tempFrame['last_bid'], 0) numInc = nonZ(numCalc) if numInc < 3 and stdDev == 0.25: tempFrame.iloc[:, select] = np(((abs(tempFrame['last_bid'] - midpoint) <= (0.25*stdDevTol))), 1, 0) #too tight, add depth, stdDev 0.25 numCalc = np((tempFrame['Select'] == 1), tempFrame['last_bid'], 0) numInc = nonZ(numCalc) bidPx = sum(numCalc)/numInc offerPx = sum(np((tempFrame['Select'] == 1), tempFrame['last_offer'], 0))/numInc tempFrame.iloc[:, stdDevIndex] = stdDev tempFrame.iloc[:, calcBid] = '%.3f' % bidPx tempFrame.iloc[:, calcOffer] = '%.3f' % offerPx finalFile = concat([finalFile, tempFrame]) return finalFile
if weights[0] > 0: # use the weights to create the profile for j in range(length): weightsSum = 0 # calculate weights sum and entropy for i in range(sequences): if not j < extremities[i][0] and not j > extremities[i][1]: result[j][aa[lines[i + 1][j]]] += weights[i] for i in range(21): # Gaps are normalised separately weightsSum += result[j][i] # normalize result for i in range(21): if result[j][i] != 0: result[j][i] = result[j][i] / weightsSum if result[j][21] != 0: result[j][21] = result[j][21] / (weightsSum + result[j][21]) elif sequences > 1: # 1 aligned sequence diverge only with gaps so no weights/entropy but gaps found result = frequencies # clip to 1 the frequence of the AA presents in the profiled protein for j in range(length): result[j][aa[lines[1][j]]] = 1.0 # convert result (a list of lists) in a string and write out the result f.write(" ".join(map(str, np(result))) + "\n") f.close()
except ValueError: for m in range(0,len(licz[i])): num[i] = num[i] + float(licz[i][m])*10**(len(licz[i])-m-1) wynik = num[0]/num[1] + num[2]/num[3] print("Twój wynik to: ",wynik) else: dziel = list(input('''Wprowadz ułamki w postaci:a/b + c/d: ''')) print() num = [0,0,0,0] licz = list() licz.append(dziel[:dziel.index('/')]) licz.append(dziel[(dziel.index('/')+1):(dziel.index('+')-1)]) del dziel[dziel.index('/')] licz.append(dziel[(dziel.index('+')+2):dziel.index('/')]) licz.append(dziel[(dziel.index('/')+1):(len(dziel))]) for i in range(0,4): for m in range(0,len(licz[i])): num[i] = num[i] + int(licz[i][m])*10**(len(licz[i])-m-1) z = np(num[1], num[3]) for i in range(0,3,2): num[i] = num[i] * int((z/num[i+1])) num[0] = num[0] + num[2] print("Twój wynik to: ", num[0],"/",z)