def update_boids(boids, time): for index in range(len(boids)): b = boids[index] b[2] += cohesion(index, boids)[0]*10 b[3] += cohesion(index, boids)[1]*10 b[2] += separate(index, boids)[0]*5 b[3] += separate(index, boids)[1]*5 b[2] += align(index, boids)[0] b[3] += align(index, boids)[1] # Limit velocity to 500 pixels per second horizontally and vertically b[2] = speed_limit(b[2], 500) b[3] = speed_limit(b[3], 500) # Update the boid's position based on its velocity and the # time that has passed since the last update. b[0] += float(b[2])/1000 * time b[1] += float(b[3])/1000 * time # Make the boid bounce off the walls. if b[0] < 0: b[0] = 0 b[2] = -b[2] elif b[0] > WIDTH: b[0] = WIDTH b[2] = -b[2] if b[1] < 0: b[1] = 0 b[3] = -b[3] elif b[1] > HEIGHT: b[1] = HEIGHT b[3] = -b[3]
def update_boids(boids, time, SEPARATION_MULTIPLIER, COHESION_MULTIPLIER, align_on, sl): for index in range(len(boids)): b = boids[index] b[2] += cohesion(index, boids)[0] * COHESION_MULTIPLIER b[3] += cohesion(index, boids)[1] * COHESION_MULTIPLIER b[2] += separate(index, boids, random_color, b[4])[0] * SEPARATION_MULTIPLIER b[3] += separate(index, boids, random_color, b[4])[1] * SEPARATION_MULTIPLIER if align_on == "y" or align_on == "Y": b[2] += align(index, boids)[0] b[3] += align(index, boids)[1] b[4] = separate(index, boids, random_color, b[4])[2] # Limit velocity to 500 pixels per second horizontally and vertically b[2] = speed_limit(b[2], sl) b[3] = speed_limit(b[3], sl) # Update the boid's position based on its velocity and the # time that has passed since the last update. b[0] += float(b[2]) / 1000 * time b[1] += float(b[3]) / 1000 * time # Make the boid bounce off the walls. if b[0] < 0: b[0] = 0 b[2] = -b[2] elif b[0] > WIDTH: b[0] = WIDTH b[2] = -b[2] if b[1] < 0: b[1] = 0 b[3] = -b[3] elif b[1] > HEIGHT: b[1] = HEIGHT b[3] = -b[3]
def MonitarTL(): global connAM, dbAM, connPM, dbPM global Lastid global oldtweet global searchKey tweets = None tweets = search.searchmain(searchKey, twitter, Lastid, "20") if (tweets == None): print("line24") return None elif (len(tweets["statuses"]) == 0): print("line27") return None else: Lastid = tweets["statuses"][0][u'id_str'] for tweet in tweets["statuses"]: try: if (key2 in tweet["text"] or key3 in tweet["text"]): doneTodos = separate.separate(tweet["text"], searchKey, key2, key3) for doneTodo in doneTodos: print("doneTodo: " + doneTodo) dbAM.execute("DELETE FROM tweets WHERE todo=", (doneTodo, )) else: print("tweet.text: ", tweet["text"]) todos = separate.separate(tweet["text"], searchKey, key2, key3) reply.reply( twitter, tweet[u'id_str'], separate.delet(tweet["text"], searchKey, key2, key3) + "\nをタスクに追加だね!\n報告しないと責めるよ!") now = datetime.datetime.now() if (now.hour < 12): db = dbAM print("AM") else: db = dbPM print("PM") for todo in todos: db.execute( "INSERT INTO tweets (todo, tweetId, userId) VALUES(?, ?, ?)", (todo, int(tweet["id"]), int(tweet["user"]["id"]))) except Exception as e: print(e) connAM.commit() connPM.commit() connAM.commit() connPM.commit()
def arithmetic_arranger(problems, solution=False): """ Takes a list of addition or subtraction problems and reformats the problems vertically for easy reading. """ mainDict = {} # Test problems argument for five or fewer elements. pcopy = problems[:] if (len(pcopy) > 5): return "Error: Too many problems." # Apply separate() to each problem to break down into parts. # Log parts into the main Dictionary under numerical keys. counter = 0 for prob in problems: aDict = separate(prob) if type(aDict) == str: return aDict mainDict['Problem%s' % str(counter + 1)] = aDict counter += 1 # Add solutions to problems if optional arg is True. if solution == True: mainDict = answer(mainDict) # Apply format_lines() to each problem key in mainDict. arranged_problems = format_lines(mainDict, solution) return arranged_problems
async def main(): if (not os.path.exists('output_sound')): os.mkdir('output_sound') yc = subprocess.Popen( 'yc iam create-token', shell=True, stdout=subprocess.PIPE ) #мне было лень получать токен при помощи python, поэтому использую консоль яндекса yc_token = yc.stdout.read().decode("utf-8") yc_token = yc_token.split( '\n' ) #это нужно, чтобы убрать символ новой строки и не включать в переменную информацию, которая может идти после токена vk_token = os.getenv('VK_TOKEN') joke = await getJoke(vk_token) print(joke) joke_separated = separate(joke) path = os.getcwd() tts_setup_file = open('{}/input_sound/tts_setup.ogg'.format(path), 'wb') tts_punchline_file = open('{}/input_sound/tts_punchline.ogg'.format(path), 'wb') tts_setup = await getTts(joke_separated[0], yc_token[0]) tts_punchline = await getTts(joke_separated[1], yc_token[0]) tts_setup_file.write(tts_setup) tts_punchline_file.write(tts_punchline) await nuzhdify()
def generate_sentences(filename, n): # separate text and generate the word weights dictionary sep_text = separate(read_text(filename)) words_dict = get_word_weights(sep_text) sentences = [] # begin all sentences with a begin tage init_sentence = ["<begin>"] while len(sentences) < n: sentence_words = init_sentence[:] while True: # get the last word of the sentence prev_word = sentence_words[-1] # get the list of words and weights for that word words, weights = zip( *wc.get_weights_from_dict(words_dict[prev_word])) # make a weighted choice for the new word based on the previous new_word = wc.weighted_choice(words, weights) sentence_words.append(new_word) # if an end tag is found and the sentence list is more than two, # format the sentence into a string and append it to the list if "<end>" in new_word: if len(sentence_words) > 2: sentences.append(format_sentence(sentence_words)) break return sentences
def generate_sentences(filename, n): # separate text and generate the word weights dictionary sep_text = separate(read_text(filename)) words_dict = get_word_weights(sep_text) sentences = [] # begin all sentences with a begin tage init_sentence = ["<begin>"] while len(sentences) < n: sentence_words = init_sentence[:] while True: # get the last word of the sentence prev_word = sentence_words[-1] # get the list of words and weights for that word words, weights = zip(*wc.get_weights_from_dict(words_dict[prev_word])) # make a weighted choice for the new word based on the previous new_word = wc.weighted_choice(words, weights) sentence_words.append(new_word) # if an end tag is found and the sentence list is more than two, # format the sentence into a string and append it to the list if "<end>" in new_word: if len(sentence_words) > 2: sentences.append(format_sentence(sentence_words)) break return sentences
def PC(L): coord = L[0] + '.xyz' main = L[1] # name of main fragment N = int(L[2]) #no. of atoms in main fragment scaling = float(L[3]) main_charge = int(L[4]) main_mult = int(L[5]) job = L[6] root = int(L[7]) n = separate.separate(coord, main, N) # n is no. of water molecules #replace_by_Q.replace_by_Q('uracil.xyz') for i in range(n): charge_water.pointcharge('Water' + str(i + 1) + '.xyz') for i in range(n): pc_water.pc_water(i + 1, n) gen_main_pc.gen_main_pc(main, n) #for i in range (n): # gen_water_pc.gen_water_pc(i+1) # make_input.make_input(main+'.xyz',main+'.pc',main_charge,main_mult) #for i in range (n): # make_input.make_input('Water'+str(i+1)+'.xyz','Water'+str(i+1)+'.pc') #submit.submit(main+'_pc') #for i in range (n): # submit.submit('Water'+str(i+1)+'_pc') # En = energy.energy(main+'_pc.out') #print En #replace_by_Q.replace_by_Q(main+'.xyz',main+'_pc.out')#update uracil charges from 1st iter #for i in range (n): # gen_water_pc.gen_water_pc(i+1,main,N)#gen. pc files for water using updated uracil charges #for i in range (n): # make_input.make_input('Water'+str(i+1)+'.xyz','Water'+str(i+1)+'.pc') #for i in range (n): # submit.submit('Water'+str(i+1)+'_pc') # Eo = 0 # j = 0 #epsilon = 0.000001 #while (abs(Eo-En) >= epsilon): # j += 1 #print abs(Eo-En) # print 'Starting iteration no. '+str(j) # Eo = En # replace_by_Q.replace_by_Q(main+'.xyz',main+'_pc.out')#update uracil charges # for i in range(n): # replace_by_Q.replace_by_Q('Water'+str(i+1)+'.xyz','Water'+str(i+1)+'_pc.out')#update water charges # for i in range (n): # pc_water.pc_water(i+1,n) # gen_main_pc.gen_main_pc(main,n)#gen. uracil pc file # for i in range (n): # gen_water_pc.gen_water_pc(i+1,main,N)#gen. water pc file # make_input.make_input(main+'.xyz',main+'.pc',main_charge,main_mult)#make input files with updated charges # for i in range (n): # make_input.make_input('Water'+str(i+1)+'.xyz','Water'+str(i+1)+'.pc') # submit.submit(main+'_pc')#submit input files # for i in range (n): # submit.submit('Water'+str(i+1)+'_pc') # En = energy.energy(main+'_pc.out') # print "Energy of "+main+" after iteration "+str(j)+" is "+str(En) # print "delta E = " + str(abs(Eo-En)) # print 'Converged energy = '+str(En)+'\n\n' # print 'Scaling the water point charges' # for i in range(n): # replace_by_Q.replace_by_Q('Water'+str(i+1)+'.xyz','Water'+str(i+1)+'_pc.out',scaling) # for i in range (n): # pc_water.pc_water(i+1,n) # gen_main_pc.gen_main_pc(main,n)#gen. main fragment pc file # print 'Submitting EOM file' # make_DLPNO_input.make_DLPNO_input(main+'.xyz',main+'.pc',job) # submit.submit(main+'_'+job+'_EOM_pc') # submit_CHELPG.submit(main+'_'+job+'_EOM_pc.root'+str(root)+'.'+job) #Regenerating water charges from EE/IP/EA calc. of main frag. # replace_by_Q_EOM.replace_by_Q_EOM(main+'.xyz',main+'_'+job+'_EOM_pc.root'+str(root)+'.'+job+'.out') # for i in range (n): # gen_water_pc.gen_water_pc(i+1,main,N)#gen. pc files for water using updated uracil charges # for i in range (n): # make_input.make_input('Water'+str(i+1)+'.xyz','Water'+str(i+1)+'.pc') # for i in range (n): # submit.submit('Water'+str(i+1)+'_pc') # for i in range(n): # replace_by_Q.replace_by_Q('Water'+str(i+1)+'.xyz','Water'+str(i+1)+'_pc.out')#update water charges #Now we have final water charges # gen_main_pc.gen_main_pc(main,n)#gen. main fragment pc file # print 'Final files generated' # print 'Submitting '+job+' file' # make_DLPNO_input.make_DLPNO_input(main+'.xyz',main+'.pc') # submit.submit(main+'_IP_DLPNO_pc') # print 'IP values obtained' make_CIS_input.make_DLPNO_input(main + '.xyz', main + '.pc', job) submit.submit(main + '_' + job + '_CIS_pc') print main + '_' + job + '_CIS_pc.out' + ' ' + main + ' ' + str(root) # append_STATE_CIS.append_IROOT(main+'_'+job+'_EOM_pc.out',main,root) return
#coding:utf-8 import separate import filer import emo_cls str = '恐惧恐惧恐惧恐惧恐惧恐惧恐惧' str = filer.filter(str) seg = separate.separate(str) print emo_cls.classify(seg)
# Final data variables X and target variables Y X = np.array(one_hot) Y = np.array(res) ## Compare accuracy of naive Bayes and logistic regression before finding best learning rate # All datasets will use for logistic regression the same learning rate = 0.01 and # iterations = 500 rate = 0.01 iterations = 500 log_model = log_regression(rate, iterations) X = log_model.bias(X) # add bias column # Separate training and testing sets X_train, Y_train, X_test, Y_test = separate.separate(X, Y) ## Logistic regression # train the data fit_iono = log_model.fit(X_train, Y_train) # Cross validation validation = cross_validation(rate, max_iterations=10000) score = validation.evaluate_log(X_train, Y_train) print("Averaged training accuracy for Logistic Regression: ", score) # Test data pre = log_model.predict(X_test, fit_iono) acc = log_model.evaluate_acc(pre, Y_test) print("Accuracy on testing data for Logistic Regression: ", acc)
def XPOL_n(L, iter_no): coord = L[0] + '.xyz' main = L[1] # name of main fragment N = int(L[2]) #no. of atoms in main fragment scaling = float(L[3]) main_charge = int(L[4]) main_mult = int(L[5]) job = L[6] root = int(L[7]) f = open(main + '_time.txt', 'w') f.close() n = separate.separate(coord, main, N) # n is no. of water molecules #replace_by_Q.replace_by_Q('uracil.xyz') for i in range(n): charge_water.pointcharge('Water' + str(i + 1) + '.xyz') for i in range(n): pc_water.pc_water(i + 1, n) for k in range(iter_no): gen_main_pc.gen_main_pc(main, n) make_input.make_input(main + '.xyz', main + '.pc', main_charge, main_mult) submit.submit(main + '_pc') time = comp_time.comp_time(main + '_pc.out') #comp. time calc. s = 'Time taken for iteration number ' + str( k + 1) + ' for main fragment :\n' s += time + '\n\n' f = open(main + '_time.txt', 'a') f.write(s) f.close() En = energy.energy(main + '_pc.out') f = open(main + '_energy.txt', 'a') s = 'Energy after iteration no. ' + str(k + 1) + ' : ' + str(En) + '\n\n' f.write(s) f.close() replace_by_Q.replace_by_Q( main + '.xyz', main + '_pc.out') #update uracil charges from 1st iter for i in range(n): gen_water_pc.gen_water_pc( i + 1, main, N) #gen. pc files for water using updated uracil charges for i in range(n): make_input.make_input('Water' + str(i + 1) + '.xyz', 'Water' + str(i + 1) + '.pc') for i in range(n): submit.submit('Water' + str(i + 1) + '_pc') time = comp_time.comp_time('Water' + str(i + 1) + '_pc.out') #comp. time calc. s = 'Time taken for iteration number ' + str( k + 1) + ' for water mol. no. ' + str(i + 1) + ' :\n' s += time + '\n\n' f = open(main + '_time.txt', 'a') f.write(s) f.close() for i in range(n): replace_by_Q.replace_by_Q('Water' + str(i + 1) + '.xyz', 'Water' + str(i + 1) + '_pc.out') #update water charges for i in range(n): pc_water.pc_water(i + 1, n) gen_main_pc.gen_main_pc(main, n) #gen. uracil pc file # for i in range (n): # gen_water_pc.gen_water_pc(i+1,main,N)#gen. water pc file # make_input.make_input(main+'.xyz',main+'.pc',main_charge,main_mult)#make input files with updated charges # for i in range (n): # make_input.make_input('Water'+str(i+1)+'.xyz','Water'+str(i+1)+'.pc') # submit.submit(main+'_pc')#submit input files # for i in range (n): # submit.submit('Water'+str(i+1)+'_pc') # En = energy.energy(main+'_pc.out') # print "Energy of "+main+" after iteration "+str(j)+" is "+str(En) # print "delta E = " + str(abs(Eo-En)) ## print 'Converged energy = '+str(En)+'\n\n' ## print 'Scaling the water point charges' ## for i in range(n): ## replace_by_Q.replace_by_Q('Water'+str(i+1)+'.xyz','Water'+str(i+1)+'_pc.out',scaling) ## for i in range (n): ## pc_water.pc_water(i+1,n) ## gen_main_pc.gen_main_pc(main,n)#gen. main fragment pc file ############# # print 'Submitting EOM file' # make_CIS_input.make_DLPNO_input(main+'.xyz',main+'.pc',job) # submit.submit(main+'_'+job+'_CIS_pc') ############# # submit_CHELPG.submit(main+'_'+job+'_CIS_pc-iroot'+str(root)+'.csp') #Regenerating water charges from EE/IP/EA calc. of main frag. # replace_by_Q_EOM.replace_by_Q_EOM(main+'.xyz',main+'_'+job+'_CIS_pc.root'+str(root)+'.'+job+'.out') # for i in range (n): # gen_water_pc.gen_water_pc(i+1,main,N)#gen. pc files for water using updated uracil charges # for i in range (n): # make_input.make_input('Water'+str(i+1)+'.xyz','Water'+str(i+1)+'.pc') # for i in range (n): # submit.submit('Water'+str(i+1)+'_pc') # for i in range(n): # replace_by_Q.replace_by_Q('Water'+str(i+1)+'.xyz','Water'+str(i+1)+'_pc.out')#update water charges #Now we have final water charges # gen_main_pc.gen_main_pc(main,n)#gen. main fragment pc file # print 'Final files generated' # print 'Submitting '+job+' file' # make_DLPNO_input.make_DLPNO_input(main+'.xyz',main+'.pc') # submit.submit(main+'_IP_DLPNO_pc') # print 'IP values obtained' # make_CIS_input.make_DLPNO_input(main+'.xyz',main+'.pc',job) # submit.submit(main+'_'+job+'_CIS_pc') # append_IROOT.append_IROOT(main+'_'+job+'_EOM_pc.out',main,root) return En
## Cross validation #score = bayes_model.cross_validation(X_train,Y_train, 5) #print(score) ## Compare accuracy of naive Bayes and logistic regression # All datasets will use for logistic regression the same learning rate = 0.01 and # iterations = 500 rate = 0.01 iterations = 500 log_model = log_regression(rate, iterations) X = log_model.bias(X) # add bias column # Separate training and testing sets X_train, Y_train, X_test, Y_test = separate.separate(X, Y) ## Logistic regression # train the data fit_iono = log_model.fit(X_train, Y_train) # Cross validation validation = cross_validation(rate, max_iterations=500) score = validation.evaluate_log(X_train, Y_train) print("Averaged training accuracy for Logistic Regression: ", score) # Test data pre = log_model.predict(X_test, fit_iono) acc = log_model.evaluate_acc(pre, Y_test) print("Accuracy on testing data for Logistic Regression: ", acc)
import charge_water import pc_water import sys, os import subprocess import shutil import make_input import replace_by_Q import gen_water_pc import gen_uracil_pc import charge_water import submit import energy N = input("Enter the coordinates file: ") n = separate.separate(N) #replace_by_Q.replace_by_Q('uracil.xyz') for i in range(n): charge_water.pointcharge('Water' + str(i + 1) + '.xyz') for i in range(n): pc_water.pc_water(i + 1, n) gen_uracil_pc.gen_uracil_pc(n) #for i in range (n): # gen_water_pc.gen_water_pc(i+1) make_input.make_input('uracil.xyz', 'uracil.pc')
outF = np.fft.rfft(out) outF[-1] = 0 out = np.fft.irfft(outF) return out rate, aWav = wavfile.read("a.wav") rate, bWav = wavfile.read("b.wav") n = len(aWav) m = len(bWav) a = aWav b = bWav pine, (fir, ) = separate(a, b) wavfile.write("pine.wav", rate, np.tile(normalize(pine), 20)) wavfile.write("fir.wav", rate, np.tile(normalize(fir), 20)) aHat, bHat = combineIngredients(pine, fir, m) wavfile.write( "aHat.wav", rate, np.concatenate((np.tile(normalize(a), 20), np.tile(normalize(aHat), 20)))) wavfile.write( "bHat.wav", rate, np.concatenate((np.tile(normalize(b), 20), np.tile(normalize(bHat), 20)))) hats = [a, b] for i in xrange(20): aHat, cHat = combineIngredients(pine, fir, m**(i + 2) // n**(i + 1)) hats.append(cHat)