def iterative(basename, maximize=False): capacity, sizes, requests, caches = io.read_file(basename + '.in') result = 0 #25234071 print('preparing...') constraints = generate_constraints(capacity, sizes, requests, caches) s = z3.Optimize() if maximize else z3.Solver() for c in constraints: s.add(c) if maximize: s.maximize(SERVE) while True: target = int(result) print('solving round for', target) s.add(SERVE > target) s.check() try: m = s.model() except z3.Z3Exception: print('UNSAT') break result = int(str(m.evaluate(SERVE))) print('found', result) d = { c: [v for v in videos if m.evaluate(has_video(v, c))] for c, videos in caches.items() } io.write_file("{}/{}.txt".format(basename, result), d) if maximize: break
def main(): start = time.clock() #直接对已经处理过non-word error的语料进行处理 testdata = readfile.read_file( 'E:\\!!!!!!!!!!study\\学习\\大四上\\NLP\\NLP intro\\homework\\1\\spelling-correction\\Assignment1\\' + 'result6.txt') testdata.columns = (['id', 'senten']) result_list = [] for i in range(100): can = [] words = nltk.word_tokenize(testdata['senten'][i]) for j in range(len(words)): p = re.findall(r"[,.?!']", words[j]) n = re.findall(r"[0-9]+", words[j]) if (len(p) == 0 and len(n) == 0): if j == 0: #开头 can.append(correct_first(words[j], words[j + 1], 0.9)) elif j == len(words) - 1: #结尾 can.append(correct_last(words[j], words[j - 1], 0.9)) else: #中间 can.append( correct(words[j], words[j - 1], words[j + 1], 0.9)) else: can.append(words[j]) result_list.append(can) print("sen " + str(i) + 'have been processed') print(time.clock() - start) #把句子复原 result = [] for i in range(len(result_list)): words = result_list[i] sen = result_list[i][0].capitalize() for j in range(1, len(words)): if words[j] in ',.?!': sen = sen + words[j] else: sen = sen + ' ' + words[j] result.append(sen) f = open( "E:/!!!!!!!!!!study/学习/大四上/NLP/NLP intro/homework/1/spelling-correction/Assignment1/result_iter.txt", 'w') for i in range(len(result)): f.write(str(i + 1) + '\t' + result[i] + '\n') f.close() #结果 anspath = 'E:/!!!!!!!!!!study/学习/大四上/NLP/NLP intro/homework/1/spelling-correction/Assignment1/ans.txt' resultpath = 'E:/!!!!!!!!!!study/学习/大四上/NLP/NLP intro/homework/1/spelling-correction/Assignment1/result_iter.txt' ansfile = open(anspath, 'r') resultfile = open(resultpath, 'r') count = 0 for i in range(1000): ansline = ansfile.readline().split('\t')[1] ansset = set(nltk.word_tokenize(ansline)) resultline = resultfile.readline().split('\t')[1] resultset = set(nltk.word_tokenize(resultline)) if ansset == resultset: count += 1 print(count) print("Accuracy is : %.2f%%" % (count * 1.00 / 10))
def main(): # if input("Test?: ") == 'y': # test() accept = False while not accept: try: grammer_name, grammer = readfile.read_file(input("File name for grammer: ")) except IOError: print("Invalid filename") else: accept = True verb = input("Verbouse Output? (y/n) ") == 'y' while 1: test_string = input('String to test: ') ##Push the string on to it's own stack input_string = lll() for i in range(len(test_string)-1, -1 , -1): ## this is kinda strange... input_string = input_string.push(test_string[i]) stack = lll() stack = stack.push('S') if verb == True: print('Test String | Stack') input_string.print_all(); print(' ', end = ''); stack.print_all(); print() while True: input_string, str_pop = input_string.pop() stack, stk_pop = stack.pop() s_result = None if(stk_pop != str_pop): s_result = grammer.search(stk_pop, str_pop) if s_result: for i in range(len(s_result)-1, 0, -1):#push all but first char stack = stack.push(s_result[i]) else: good = False ##String is not part of language break if(stk_pop == str_pop) and (str_pop == '$'): good = True break if verb == True: input_string.print_all(); print(' ', end = ''); stack.print_all(); print() if good == True: print(test_string + ' is in the language of ' + grammer_name) else: print(test_string + ' is NOT in the language of ' + grammer_name) if input("Test another string? (y/n) ") != 'y': break
def run_training(): train_log_dir = '/home/viplab/Desktop/vgg_var2/log/train/' tfrecords_file = '/home/viplab/Desktop/vgg_var2/tfrecord/train.tfrecords' save_dir = '/home/viplab/Desktop/vgg_var2/tfrecord/' image_list, obj_label_list = readfile.read_file() N_SAMPLES = len(obj_label_list) readfile.convert_to_tfrecord(image_list, obj_label_list, save_dir, 'train') train_batch, label_batch = readfile.read_and_decode( tfrecords_file, BATCH_SIZE, IMG_W, IMG_H, N_CLASSES) print('train_batch', train_batch) x = tf.placeholder(tf.float32, shape=[BATCH_SIZE, IMG_W, IMG_H, 3]) y = tf.placeholder(tf.float32, shape=[BATCH_SIZE, N_CLASSES]) train_logits = VGG19.VGG19(x, N_CLASSES) train_loss = VGG19.losses(train_logits, y) train__acc = VGG19.evaACC(train_logits, y) train_op = VGG19.trainning(train_loss, learning_rate) #將之前定義的所有summary op整合在一起 summary_op = tf.summary.merge_all() saver = tf.train.Saver() #存module sess = tf.Session() #對所有變量初始化 sess.run(tf.global_variables_initializer()) sess.run(tf.local_variables_initializer()) coord = tf.train.Coordinator() threads = tf.train.start_queue_runners(sess=sess, coord=coord) #創建一個file writer 用來寫數據 train_writer = tf.summary.FileWriter(train_log_dir, sess.graph) counter = 0 k = 0 try: for step in np.arange(0, EPOCH): print("epoch: ", step) k = 0 while k < int(N_SAMPLES / BATCH_SIZE): if coord.should_stop(): break #print('train_batch',train_batch) image, label = sess.run([train_batch, label_batch]) _, tra_loss, tra_acc = sess.run( [train_op, train_loss, train__acc], feed_dict={ x: image, y: label }) counter += 1 print('Step %d, train loss = %.2f, train accuracy = %.4f%%' % (counter, tra_loss, tra_acc * 100)) #if counter %50 = 0 : #summary_str = sess.run(summary_op) #train_writer.add_summary(summary_str, counter) checkpoint_path = os.path.join(train_log_dir, 'model.ckpt') saver.save(sess, checkpoint_path, global_step=counter) k += 1 except tf.errors.OutOfRangeError: print('Done training -- epoch limit reached') finally: coord.request_stop() coord.join(threads) sess.close()
'u': 21, 'v': 22, 'w': 23, 'x': 24, 'y': 25, 'z': 26 } filename = 'thefile.txt' if len(sys.argv) > 1: filename = sys.argv[1] if len(sys.argv) > 2: delimiter = sys.argv[2].strip('"') file_str = readfile.read_file(filename) lines = file_str.split(delimiter) print lines highest = 0, '' #break each line up into sentences for line in lines: #sline = line.replace('\n','') #print 'sline={0}'.format(sline) line_amount = 0 for l_char in line: #making the assumption that the encoding of the file is ascii #if unicode support is needed, unichr() function can be used
#!/usr/bin/env python2 # sam1.py # Perrine Letellier # import numpy as np from scipy import ndimage import networkx as nx import readfile import operator time = 54000 nb_car = 8 source = 4516 graph = readfile.read_file("links.txt") f=open("sol","w") print>>f, nb_car for car in range(nb_car) : costs_dict = dict() paths_dict = dict() time_dict = dict() for vertice in graph.nodes() : a,b = nx.shortest_path(graph, source, vertice, weight='cost') costs_dict[str(vertice)] = a paths_dict[str(vertice)] = b #compute the time needed for each path total = 0
def dump_to_db(host='localhost', port=3306): try: connection = pymysql.connect( host=host, port=port, user=db_username, password=db_password, ) except Exception as e: raise e print('Mysql连接失败!') return None else: print('Mysql连接成功') cursor = connection.cursor() files = _read_files(databases_dir) if not files: print('Database文件夹为空') return None #读取所有数据库 query = "show databases" cursor.execute(query) _databases = cursor.fetchall() for file in files: try: database, data, cols_name = read_file( os.path.join(databases_dir, file)) except Exception as e: raise e print(f'处理{file}出错!') continue if (database, ) in _databases: print(f"数据库{database}已存在") continue # 创建数据库 try: sql = "create database " + f"`{database}`" cursor.execute(sql) except Exception as e: print(f'创建数据库{database}失败!') continue print(f'数据库{database}创建成功!') sql = 'use ' + f"`{database}`" cursor.execute(sql) for table_name in data: try: sql = "drop table if exists " + f"{table_name}" cursor.execute(sql) #创建表 sql = f'create table `{table_name}` (`id` int not null auto_increment, ' for col in cols_name[table_name]: sql += f'`{col}` varchar(45),' sql += 'primary key (`id`) )' try: cursor.execute(sql) except Exception as e: print(f'创建表{table_name}失败!') continue else: print(f'创建表{table_name}成功!') #写入数据 row是某一个表的一行数据 for i, rows in enumerate(data[table_name]): #插入数据库 base_sql = f"insert into `{table_name}` (" for col in cols_name[table_name]: base_sql += f"`{col}`," base_sql = base_sql[:-1] base_sql += ") values (" sql = "" for d in rows: d = d.replace("'", "\\'") sql += f"'{d}'," #机场信息里面带有"'" sql = sql[:-1] sql += ")" try: #插入一条数据 cursor.execute(base_sql + sql) except Exception as e: raise e break else: print( f'表{table_name}已完成{int((i+1)/len(data[table_name])*100)}%', end='\r') except Exception as e: print(f'表{table_name}插入数据失败!') try: connection.commit() except Exception as e: print(f"数据库{database}:表{table_name} 插入数据失败!") connection.rollback() connection.close()
delimiter = '. ' alphabet = {'a':1, 'b':2, 'c':3, 'd':4, 'e':5, 'f':6, 'g':7, 'h':8, 'i':9, 'j':10, 'k':11, 'l':12, 'm':13, 'n':14, 'o':15, 'p':16, 'q':17, 'r':18, 's':19, 't':20, 'u':21, 'v':22, 'w':23, 'x':24, 'y':25, 'z':26} filename = 'thefile.txt' if len(sys.argv) > 1: filename = sys.argv[1] if len(sys.argv) > 2: delimiter = sys.argv[2].strip('"'); file_str = readfile.read_file(filename) lines = file_str.split(delimiter); print lines highest = 0,'' #break each line up into sentences for line in lines: #sline = line.replace('\n','') #print 'sline={0}'.format(sline) line_amount = 0 for l_char in line : #making the assumption that the encoding of the file is ascii #if unicode support is needed, unichr() function can be used
def read_file(path): return readfile.read_file(path)