def main(): # Load data files nRows_iris = 150 nColumns_iris = 5 num_epoch_iris = 1 learning_rate_iris = .5 nRows_diabetes = 768 nColumns_diabetes = 9 num_epoch_diabetes = 1 learning_rate_diabetes = .5 iris = lf.load_file("iris.csv", nRows_iris, nColumns_iris) diabetes = lf.load_file("diabetes.data", nRows_diabetes, nColumns_diabetes) # Collect target data before it is normalized iris_targets = [] diabetes_targets = [] for row in range(nRows_iris): iris_targets.append(iris[row][nColumns_iris - 1]) for row in range(nRows_diabetes): diabetes_targets.append(diabetes[row][nColumns_diabetes - 1]) # Normalize data files iris = preprocessing.normalize(iris) diabetes = preprocessing.normalize(diabetes) # Run Iris iris_num_layers_array = [ 1, 3 ] # Length is num_layers, each element is num_nodes for i in range(num_epoch_iris): np.random.shuffle(iris) iris_net = net.network(iris_num_layers_array, nRows_iris, nColumns_iris, iris, "Iris", learning_rate_iris) iris_net.run_network() iris_net.generate_guesses() iris_net.update_weights() iris_net.print_accuracy(iris_targets) if learning_rate_iris > .1: learning_rate_iris -= .001 # Run Diabetes diabetes_num_layers_array = [1, 2] for i in range(num_epoch_diabetes): np.random.shuffle(diabetes) diabetes_net = net.network(diabetes_num_layers_array, nRows_diabetes, nColumns_diabetes, diabetes, "Diabetes", learning_rate_diabetes) diabetes_net.run_network() diabetes_net.generate_guesses() diabetes_net.update_weights() diabetes_net.print_accuracy(diabetes_targets) if learning_rate_diabetes > .1: learning_rate_diabetes -= .001
def main(): # Load data files nRows_iris = 150 nColumns_iris = 5 num_epoch_iris = 1 learning_rate_iris = .5 nRows_diabetes = 768 nColumns_diabetes = 9 num_epoch_diabetes = 1 learning_rate_diabetes = .5 iris = lf.load_file("iris.csv", nRows_iris, nColumns_iris) diabetes = lf.load_file("diabetes.data", nRows_diabetes, nColumns_diabetes) # Collect target data before it is normalized iris_targets = [] diabetes_targets = [] for row in range(nRows_iris): iris_targets.append(iris[row][nColumns_iris - 1]) for row in range(nRows_diabetes): diabetes_targets.append(diabetes[row][nColumns_diabetes - 1]) # Normalize data files iris = preprocessing.normalize(iris) diabetes = preprocessing.normalize(diabetes) # Run Iris iris_num_layers_array = [1, 3] # Length is num_layers, each element is num_nodes for i in range(num_epoch_iris): np.random.shuffle(iris) iris_net = net.network(iris_num_layers_array, nRows_iris, nColumns_iris, iris, "Iris", learning_rate_iris) iris_net.run_network() iris_net.generate_guesses() iris_net.update_weights() iris_net.print_accuracy(iris_targets) if learning_rate_iris > .1: learning_rate_iris -= .001 # Run Diabetes diabetes_num_layers_array = [1, 2] for i in range(num_epoch_diabetes): np.random.shuffle(diabetes) diabetes_net = net.network(diabetes_num_layers_array, nRows_diabetes, nColumns_diabetes, diabetes, "Diabetes", learning_rate_diabetes) diabetes_net.run_network() diabetes_net.generate_guesses() diabetes_net.update_weights() diabetes_net.print_accuracy(diabetes_targets) if learning_rate_diabetes > .1: learning_rate_diabetes -= .001
def main(): # Straight Path Test, set up your path x_start = 0 y_start = 0 straight_path_file = 'path_SRC\straight_path.csv' straight_line = load_file(straight_path_file) straight_path = Path(straight_line, x_start, y_start) # Test against simulated movement print('----------- Actual vs. Expected Straight Line-------------------') actual_line_file = 'path_SRC\straight_error_path.csv' actual_line = load_file(actual_line_file) line_error = [] line_index = [] for i in range(0, len(actual_line[0][:]) - 1): actual = [actual_line[0][i], actual_line[1][i]] error, index = straight_path.find_error(actual) line_error.append(error) line_index.append(index) # print("Error:", error) # print("Index: ", index) # Curve Path Test, set up your path circle_path_file = 'path_SRC\circle_path.csv' circle = load_file(circle_path_file) circle_path = Path(circle, x_start, y_start) curve_error = [] curve_index = [] # arbitrary test points print('----------- Actual vs. Expected Circle Path-------------------') actual_circle_file = 'path_SRC\circle_error_path.csv' actual_circle = load_file(actual_circle_file) for i in range(0, len(actual_circle[0][:]) - 1): actual = [actual_circle[0][i], actual_circle[1][i]] error, index = circle_path.find_error(actual) curve_error.append(error) curve_index.append(index) # plot errors # There is a plotting artifact on the last position, I need to figure out to solve the last position if you go beyond the last position. circle = plt.figure(1) plt.plot(curve_index, curve_error) plt.show() line = plt.figure(2) plt.plot(line_index, line_error) plt.show()
def get_game_lengths(n): output = [] data = load_file(n) for game in data: turns = game["a"] + game["b"] + game["t"] output.append(turns) return output
def menu(): try: print("\nSECRET AGENT CHAT") choices = ['1', '2', '3', '4'] choice = '0' while True: while choice not in choices: print('What would you like to do?') print('1. Generate one-time pads') print('2. Encrypt a message') print('3. Decrypt a message') print('4. Quit the program') choice = input('Please type 1, 2, 3 or 4 and press Enter ') if choice == '1': sheets = int( input( 'How many one-time pads would you like to generate? ' )) length = int( input('What will be your maximum message length? ')) generate_otp(sheets, length) elif choice == '2': filename = input( 'Type in the filename of the OTP you want to use ') sheet = load_file(filename) encrypt(sheet) #filename = input('What will be the name of the encrypted file? ') elif choice == '3': filename = input( 'Type in the filename of the OTP you want to use ') sheet = load_file(filename) filename = input( 'Type in the name of the file to be decrypted ') ciphertext = load_file(filename) plaintext = decrypt(ciphertext, sheet) print('The message reads:') print('') print(plaintext) elif choice == '4': exit() choice = '0' except FileNotFoundError as file_not_found_error: print(file_no_found_error) else: print('') finally: print('')
def main(): """ 主函数 """ # 语义理解部分,读取文本信息,提取条件和问题 cond_roman, cond_price, ques_roman, ques_price, unknown = load_file("task.txt") # 逻辑运算部分,根据条件和问题,计算结果 answer_roman, answer_price = compute(cond_roman, cond_price, ques_roman, ques_price) # 结果输出部分,按照规则描述进行输出 output("task.txt", answer_roman, answer_price, unknown)
def main(): """ 主函数 """ # 语义理解部分,读取文本信息,提取条件和问题 cond_roman, cond_price, ques_roman, ques_price, unknown = load_file( "task.txt") # 逻辑运算部分,根据条件和问题,计算结果 answer_roman, answer_price = compute(cond_roman, cond_price, ques_roman, ques_price) # 结果输出部分,按照规则描述进行输出 output("task.txt", answer_roman, answer_price, unknown)
def get_game_lengths(n): output = [] data = load_file(n) for game in data: turns = game["a"] + game["b"] if game["W"] == "A": win = game["a"] else: win = game["b"] output.append(win / turns) return output
def count_face_cards(n, output_w, output_a): data = load_file(n) for game in data: if game["W"] == "A": g = game["A"] else: g = game["B"] g = sum(g[-4:]) output_w.append(g) g = sum(game["A"][-4:]) output_a.append(g)
def count_wins(n, counts, wins): data = load_file(n) for game in data: turns = game["a"] + game["b"] for player in [["a", "A"], ["b", "B"]]: ratio = game[player[0]] / turns rate = int(round(ratio * 100.0)) if game["W"] == player[1]: counts[rate] += 1 wins[rate] += 1 else: counts[rate] += 1
def main(): # Load data files nRows_iris = 150 nColumns_iris = 5 nRows_diabetes = 768 nColumns_diabetes = 9 iris = lf.load_file("iris.csv", nRows_iris, nColumns_iris) diabetes = lf.load_file("diabetes.data", nRows_diabetes, nColumns_diabetes) # Normalize data files iris = norm.normalize_iris(iris) diabetes = norm.normalize_diabetes(diabetes) # Feed inputs into neurons layer_of_iris = ln.layer_of_neurons(nRows_iris, nColumns_iris, iris, "Iris") layer_of_iris.run_neurons() layer_of_iris.print_results() layer_of_diabetes = ln.layer_of_neurons(nRows_diabetes, nColumns_diabetes, diabetes, "Diabetes") layer_of_diabetes.run_neurons() layer_of_diabetes.print_results()
def encrypt(sheet): """Function to encrypt a message.""" file_content = load_file('plaintext.txt') for line in file_content: ciphertext = '' for position, character in enumerate(line): if character not in ALPHABET: ciphertext += character else: encrypted = (ALPHABET.index(character) + int(sheet[position])) % 26 ciphertext += ALPHABET[encrypted] #print(f"{ciphertext}") save_file('ciphertext.txt', ciphertext + '\n')
def __init__(self, file_name=None, l_range=range(12, 31, 2), s_range=range(2, 12)): # 需要输入文件名 assert file_name != None # 载入文件 self.close_data = load_file(file_name).read_file() # year range从loadfile中传递? self.time_list = self.get_ftd_list() print(self.time_list) self.ret_list = {} # MACD指标选择的长短期为12,26 self.year_range = range(2006, 2021) self.l_range = l_range # 长期均线选择 self.s_range = s_range # 短期均线选择 self.fac_range = [i / 100 for i in range(96, 106, 2)] self.main_loop()
def __init__(self): # Publish the car input data self.car_pub = rospy.Publisher("car_input", car_input, queue_size=10) self.R = rospy.Rate(1) self.speed = 0 self.path = [] self.control = [] self.pos = [] self.coordinates = np.empty((0, 2)) self.error = np.empty((0, 1)) self.steering_angle = np.empty((0, 1)) self.Z = car_input() x_start = 0 y_start = 0 self.start_time = 0 self.tracking = False self.driving = False # For moving the rc car at fixed velocity # elapsed_time = 0.0 # start_time = time.time() # while elapsed_time < 1.5: # elapsed_time = time.time() - start_time # Z = car_input() # Z.steer_angle = 0 # Z.power = 0.5 # print "publishing" # self.car_pub.publish(Z) # Subscribing to position data from vicon self.vicon_sub = rospy.Subscriber("/vrpn_client_node/rc_car/pose", PoseStamped, self.callback, queue_size=10) # mode = raw_input("Select Vehicle Motion? 1) Move Straight 2) Straight Path (PID) 3) Curve Path (PID)") # speed_text = raw_input("Select Speed (s/f)") mode = '3' speed_text = 's' if speed_text == 'f': self.speed = 0.6 kp = 12.0 # 0.1 12 ki = 0.01 # 0.001 0.01 kd = 7.0 # 2.8 7.0 else: self.speed = 0.35 # < 0.3 is reverse so use 3.5 kp = 12.0 # 0.1 12 ki = 0.001 # 0.001 0.01 kd = 7.0 # 2.8 7.0 if mode == '1': # Drive straight with no controller print('Driving in circle no PID') angle = -18.1 self.move(angle) elif mode == '2': # Drive straight with PID Control angle = 0 # Setup Path straight_path_file = 'rc_node/scripts/path_SRC/straight_path_2m.csv' straight_line = load_file(straight_path_file) self.path = Path(straight_line, x_start, y_start) # Setup PID self.control = PID(kp, ki, kd) self.tracking = True self.move(angle) elif mode == '3': # Drive allow curved path with PID Control angle = -18.1 # Setup Path curve_path_file = 'rc_node/scripts/path_SRC/circle_path.csv' curved_line = load_file(curve_path_file) self.path = Path(curved_line, x_start, y_start) # Setup PID self.control = PID(kp, ki, kd) self.tracking = True self.move(angle) elif mode == '4': # Drive allow figure 8 path with PID Control angle = -18.1 # this angle will result in a 0.25 m radius circle # Setup Path figure8_path_file = 'rc_node/scripts/path_SRC/figure8_path_16x.csv' curved_line = load_file(figure8_path_file) self.path = Path(curved_line, x_start, y_start) # Setup PID self.control = PID(kp, ki, kd) self.tracking = True self.move(angle) else: print('Nothing selected')
lon = 60 lat = 0 ang_rad = np.arange((1 / 360) * 2 * np.pi, np.pi / 2, (2 * np.pi) / 360) x_corr = np.zeros(len(ang_rad), dtype=complex) x_corrs = np.zeros(shape=(360, RANGE)) for i in range(RANGE): cmb_map = rotate_to_top(np.multiply(cmb_map_og, mask_ring), lon, lat) for i in range(len(ang_rad)): strip_finder(cmb_map, ang_rad[i], NSIDE) circle_a = load_file('strip_a') circle_b = load_file('strip_b') x_corr[i] = match_circle_s(circle_a, circle_b, 0, 720) ang_rad_deg = ang_rad * (360 / (2 * np.pi)) fig, ax = plt.subplots() ax.plot(ang_rad_deg, x_corr) ax.set_title('Correlation of Circle of CMB: Galactic Plane, lon=' + str(lon)) ax.set_xlabel('Angular Radius') ax.set_ylabel('X-Correlation: S') ax.legend(['Lag = 0$^\circ$']) ax.axhline(0, color='black') plt.xticks(np.arange(0, 91, 10))
CMB_DIST = 14000 CELL_SIZE = 320 lon = 207.8 lat = -56.3 bins = 360 cmb_map = cmb_map_og*mask_ring ang_rad = np.linspace((1/360)*2*np.pi, np.pi/2, 90) no_circles = len(ang_rad) x_corr = np.zeros((no_circles,bins), dtype=complex) for i in range(no_circles): rad_lag = 0 strip_finder(cmb_map, ang_rad[i], NSIDE) circle_a = load_file("strip_a", bins) circle_b = load_file("strip_b", bins) for j in range(bins): x_corr[i,j] = match_circle_s(circle_a, circle_b, rad_lag, bins, 720) rad_lag += 2*np.pi/360 print i, time.time()-start ang_rad = ang_rad*(360/(2*np.pi)) x_corr = np.swapaxes(x_corr,0,1) for i in range(bins): np.savetxt('/opt/local/l4astro/rbbg94/data/ngp_corr_'+str(i)+'.csv', x_corr[i], delimiter = ',')
# word_embeddings = model.input_embeddings() ## sp1 and sp2 based in distinct words # sp1, sp2 = scorefunction(word_embeddings) ## loss,data[0] to loss.data # print('eporch,batch=%2d %5d: sp=%1.3f %1.3f pair/sec = %4.2f loss=%4.3f\r' \ # % (epoch, batch_num, sp1, sp2, (batch_num - batch_new) * self.batch_size / (end - start), # loss.data), end="") print('eporch,batch=%2d %5d: pair/sec = %4.2f loss=%4.3f\r' \ % (epoch, batch_num, (batch_num - batch_new) * self.batch_size / (end - start), loss.data), end="") batch_new = batch_num start = time.time() print() batch_num = batch_num + 1 # saving each epoch # bell print('\a') model.save_embedding(os.path.join("data", "embed_epoch_" + str(epoch) + ".vec"), self.op.dic_idx2word) print() print("Optimization Finished!") if __name__ == '__main__': load_file("data",'europarl-v7.pt-en.pt') wc = word2vec(os.path.join("data",'europarl-v7.pt-en.pt')) wc.train()
@app.route('/search/results', methods=['GET', 'POST']) def search_request(): # print(request.form["input"]) search_term = request.form.get("input") # search_term = flask.request.args.get('name') Q = cosine_similarity(books_data=books_data, DF=DF, tf_idf=tf_idf, total_vocab=total_vocab, total_vocab_size=total_vocab_size, k=10, query=search_term) print(Q) return render_template('results.html', res=Q) # def index(): # return render_template('index.html', variable = Q) if __name__ == "__main__": load_data = False if not load_data: books_data = load_file() N = books_data.shape[0] processed_bookname, processed_text = process_data(books_data) DF, total_vocab_size, total_vocab = build_DF(N, processed_text, processed_bookname) tf_idf, df = tf_idf(N, processed_text, processed_bookname) # Q = cosine_similarity(books_data = books_data,DF = DF, tf_idf = tf_idf,total_vocab = total_vocab, total_vocab_size = total_vocab_size, k = 10, query = "The evening of the day on which Mr Gibson had been to see the squire") app.run(debug=True)
error_phases = 360 ang_rad = np.linspace((1 / 360) * 2 * np.pi, np.pi / 2, 90) no_circles = len(ang_rad) x_corr = np.zeros((no_sims, no_circles, error_phases), dtype=complex) for i in xrange(no_sims): cmb_map = hp.fitsfunc.read_map( "/opt/local/l4astro/rbbg94/sims/dx12_v3_smica_cmb_mc_000" + str(i).zfill(2) + "_raw.fits") NSIDE = hp.npix2nside(len(cmb_map)) for j in xrange(no_circles): rad_lag = 0 strip_finder(cmb_map, ang_rad[j], NSIDE) circle_a = load_file('strip_a', bins) circle_b = load_file('strip_b', bins) for k in xrange(error_phases): x_corr[i, j, k] = match_circle_s(circle_a, circle_b, rad_lag, bins, 720) rad_lag += 2 * np.pi / bins print k, time.time() - start mean = np.zeros(no_circles) std_dev = np.zeros(no_circles) for i in xrange(error_phases): for j in xrange(no_circles): sum_sq = 0 mean[j] = np.mean(x_corr[:, j, i]) for k in xrange(no_sims):
# Select hardness of the puzzle #methods= ['dpll','dpll_dlcs','dpll_jw','dpll_dlis','dpll_moms','nocnf'] methods = ['dpll', 'dpll_dlcs'] puzzle_display = 'off' hardness = ['easy', 'medium', 'hard'] batch = 10 results = pd.DataFrame( columns=['method', 'hardness', 'resolvetime', 'batch']) method_result = pd.DataFrame( columns=['method', 'hardness', 'methodtime', 'batch']) for i in range(batch): print('Batch no:', i + 1) for hard in hardness: rule_path, puzzle_path, puzzle_dim = load_file(hard) knowledge_base = load_KB(rule_path) puzzles = load_puzzle(puzzle_path) selected_puzzles = random.choices(puzzles, k=30) for method in methods: print('Hardness:', hard) print('Method:', method) start = timeit.default_timer() for puzzle in tqdm(selected_puzzles): puzzle_start = timeit.default_timer() solve_sudoku(knowledge_base, puzzle, method=method, dim=puzzle_dim, puzzle_display=puzzle_display) puzzle_end = timeit.default_timer()
# stack up the dataframes and later concatenate in case we # want both commandline strings (for weird mutations like translocations) # and files mutated_region_dfs = [] if args.string: df = load_comma_string(args.string) mutated_region_dfs.append(df) # loop over all the input files and # load each one into a dataframe for input_filename in args.input_file: transcripts_df, raw_genomic_mutation_df, variant_report = \ load_file(input_filename, max_peptide_length = peptide_length) mutated_region_dfs.append(transcripts_df) # print each genetic mutation applied to each possible transcript # and either why it failed or what protein mutation resulted if not args.quiet: print_mutation_report( input_filename, variant_report, raw_genomic_mutation_df, transcripts_df) if len(mutated_region_dfs) == 0: parser.print_help() print "\nERROR: Must supply at least --string or --input-file" sys.exit()
answer_roman = ques_roman answer_price = ques_price price = {} # 保存商品单价 # 转为罗马数字表示 translater = Translater(info_roman=cond_roman) for k in ques_roman.keys(): answer_roman[k] = translater.translate_to_num( translater.translate_to_roman(k)) # 计算单价 for k, v in cond_price.iteritems(): for info_roman in v.keys(): price[k] = v[info_roman] / \ float(translater.translate_to_num( translater.translate_to_roman(info_roman))) # 计算结果 for k, v in ques_price.iteritems(): for info_roman in v.keys(): answer_price[k][info_roman] = int( translater.translate_to_num( translater.translate_to_roman(info_roman)) * price[k]) return answer_roman, answer_price if __name__ == "__main__": cond_roman, cond_price, ques_roman, ques_price = load_file("task.txt") print compute(cond_roman, cond_price, ques_roman, ques_price)