def main(): ctrl = True # 标记是否退出系统 while (ctrl): menu.menu() option = input("请选择:") option_str = re.sub('\D', '', option) # 提取数字 if option_str in ['0', '1', '2', '3', '4', '5', '6', '7']: option_int = int(option_str) if option_int == 0: print("您已退出学生管理系统!") ctrl = False elif option_int == 1: insert.insert() elif option_int == 2: search.search() elif option_int == 3: delete.delete() elif option_int == 4: modify.modify() elif option_int == 5: sort.sort() elif option_int == 6: total.total() elif option_int == 7: show.show()
def main(): winrate, time_played = retrieve_data_general.retrieve() attackers_list, defenders_list = retrieve_data_operators.retrieve_data() sort.sort(attackers_list) sort.sort(defenders_list) plot.plot_operators(attackers_list, defenders_list, winrate, time_played, version)
def mainindividual(ind1, ind2, rank): lenth = len(ind1) ranklist = [] count = 1 while count <= lenth: ranklist.append(count) count += 1 Sym = sym(ranklist).copy() #print(Sym) position = 1 summ = [] while position <= lenth: for grpele in Sym: stens1 = sort(tensor1(ind1, ind2, grpele, position, rank)) stens2 = sort(tensor2(ind1, ind2, grpele, position, rank)) if stens1 != []: summ.append(stens1) if stens2 != []: summ.append(stens2) #print(stens1) #print(stens2) position += 1 #print(summ) tures = sum(summ) return tures
def files_in_a_flash(path): """ PARAMETERS: ----------- path: the path to the emplacement of the archives file in memory """ probabilities = learn.learn(path+'/sorted') sort.sort(probabilities, path)
def test_sort(): """ :return: """ l = [2, 1, 3, -5, 3, 4] sort.sort(l) assert l == [-5, 1, 2, 3, 3, 4]
def Sortbytype(self,event): sort.sort() self.vLeftListBox.Destroy()#销毁之前的窗口 self.vLeftListBox = wx.ListBox(self, -1, choices=[os.path.splitext(item)[0] for item in os.listdir(os.getcwd()) if os.path.splitext(item)[1] in ('.list',)]) self.__do_layout()#重新加载窗体布局 self.Bind(wx.EVT_CONTEXT_MENU, self.ListRightButton, self.vLeftListBox) self.Bind(wx.EVT_LISTBOX, self.ButtonClick, self.vLeftListBox) event.Skip()
def addWord(word): with open("ordbok_eng.txt", "a") as file: file.write(" ") file.write(word) file.close() file = open("ordbok_eng.txt", "r+") filename = "ordbok_eng.txt" sort(file, filename)
def blog(): if request.args.get('id'): blog_id = request.args.get('id') blog = Blog.query.filter_by(id=blog_id).first() return render_template("newblog.html", blog = blog) else: posts = Blog.query.all() sort(posts) return render_template("blog.html", posts=posts)
def Sortbytype(self, event): sort.sort() self.vLeftListBox.Destroy() #销毁之前的窗口 self.vLeftListBox = wx.ListBox( self, -1, choices=[ os.path.splitext(item)[0] for item in os.listdir(os.getcwd()) if os.path.splitext(item)[1] in ('.list', ) ]) self.__do_layout() #重新加载窗体布局 self.Bind(wx.EVT_CONTEXT_MENU, self.ListRightButton, self.vLeftListBox) self.Bind(wx.EVT_LISTBOX, self.ButtonClick, self.vLeftListBox) event.Skip()
def test_anylenlist(): testlist = [1, 2, 4, 0, 1.099, -12, -2, -4.56, 2, -3] sortlist = [-12, -4.56, -3, -2, 0, 1, 1.099, 2, 2, 4] result = s.sort(testlist) assert result == sortlist
def _get_rank_list(L, *, cmp=None): def origin_cmp(a, b): if a == b: return 0 return -1 if a < b else 1 def make_cmp(cmp): if not cmp: cmp = origin_cmp return lambda a, b: cmp(a[1], b[1]) Lx = [[i, x] for i, x in enumerate(L)] sort.sort(Lx, cmp=make_cmp(cmp)) for (i, x) in enumerate(Lx): x[1] = i sort.sort(Lx, cmp=lambda a, b: a[0] - b[0]) return tuple(x[1] for x in Lx)
def call_sort(nums): if (sorted(nums) == sort.sort(nums)): os.system('echo "[S]({}/{}/sort.py)" > {}'.format( sys.argv[1], sys.argv[2], sys.argv[3])) else: os.system('echo "[B]({}/evaluation/sort.md)" > {}'.format( sys.argv[1], sys.argv[3]))
def testSort2(self): file_list = ['Ie5', 'Ie6', 'Ie4_01', 'Ie401sp2', 'Ie4_128', 'Ie501sp2'] sorted_list = sort.sort(file_list) success_file_list = [ 'Ie4_01', 'Ie4_128', 'Ie5', 'Ie6', 'Ie401sp2', 'Ie501sp2' ] self.assertEqual(success_file_list, sorted_list)
def b_thresh(self, b_data): dat = b_data if len(dat[0]) == 0: raise ValueError('dimension of data should not be zero') else: dim = len(b_data[0])-1 d_num = len(dat) sort_res = [[]] sortmp = [] thresh_a = [[]] for d in range(dim): #sort each axis in data res = sort.sort(dat) sortmp = res.quicksort(0,d_num -1,d) sort_res[len(sort_res) - 1] = sortmp sort_res.append([]) sort_res.pop() for i in range(len(sort_res)): #derive thesh of all axis thresh_a[len(thresh_a)-1] = self.thresh(sort_res[i], i) thresh_a.append([]) thresh_a.pop() return thresh_a #thesh_a[d][n] d: axis, n: number of threshold
def b_thresh(self, b_data): dat = b_data if len(dat[0]) == 0: raise ValueError('dimension of data should not be zero') else: dim = len(b_data[0]) - 1 d_num = len(dat) sort_res = [[]] sortmp = [] thresh_a = [[]] for d in range(dim): #sort each axis in data res = sort.sort(dat) sortmp = res.quicksort(0, d_num - 1, d) sort_res[len(sort_res) - 1] = sortmp sort_res.append([]) sort_res.pop() for i in range(len(sort_res)): #derive thesh of all axis thresh_a[len(thresh_a) - 1] = self.thresh(sort_res[i], i) thresh_a.append([]) thresh_a.pop() return thresh_a #thesh_a[d][n] d: axis, n: number of threshold
def test_sort(self): frequencies = { '中国': 59.44, '下午': 87.28, '上': 3409.33, '下': 1549.34, '上午': 17.74, '中午': 18.99 } words = [ '中国', '下午', '上', '下', '上午', '中午' ] expected = [ '上', '下', '下午', '中国', '中午', '上午' ] self.assertEqual(sort.sort(frequencies, words), expected)
def test_if_we_call_sort_with_no_iterable_should_raise_TypeError(self): exc = None try: res = sort() except Exception as err: exc = err self.assertIsNotNone(exc) self.assertEqual(str(exc), 'We need iterable')
def master(urls): keywords = Counter() # call mapper on all urls and reduce to one master dict for url in urls: temp_dict = Counter(mapper(url)) keywords = keywords + temp_dict # sort by count and return return sort(keywords)
def start(initial_file, compare_files, current_dir): """ Start the script, responsible for creating folders and executing the compare file :param initial_file: the first primary file :param compare_files: an OrderedDict of secondary files and match types :param current_dir: a full path to the directory of the fasta files :return: """ total_compares = len(compare_files) current_compare = 1 input_protein = initial_file with open("{}/counts.txt".format(current_dir), "w") as FILEHANDLE1: print("creating counts file...") for input_nucl in compare_files: makeblastdb_cline, tblastn_cline, next_input = set_cmd( current_dir, current_compare, input_protein, input_nucl, compare_files[input_nucl]) subprocess.run(makeblastdb_cline.split()) subprocess.run(tblastn_cline.split()) compare.start("compare_output{}.xml".format(current_compare), input_protein) sort.sort(next_input, "sorted_{}.txt".format(next_input)) count(next_input, input_nucl, FILEHANDLE1) next_compare = current_compare + 1 if next_compare <= total_compares: next_file = "{}_output{}.fasta".format( next_input, current_compare) # the next primary file os.mkdir("{}/compare{}".format(current_dir, next_compare)) shutil.copyfile( next_input, "{}/compare{}/{}".format(current_dir, next_compare, next_file)) input_protein = next_file current_compare += 1 print(input_nucl) print(compare_files[input_nucl])
def test_if_iterable_is_not_list_should_raise_TypeError(self): entry = '123' exc = None try: res = sort(entry) except Exception as err: exc = err self.assertIsNotNone(exc) self.assertEqual(str(exc), 'Iterable should be tuple or list')
def test_if_have_list_or_tuple_and_have_key_should_raise_TypeError(self): entry = [1, 2, 3] exc = None try: res = sort(entry, False, 123) except Exception as err: exc = err self.assertIsNotNone(exc) self.assertEqual(str(exc), 'Key can be added only if we have dicts')
def test_if_elements_in_tuple_or_list_are_not_int_should_raise_ValueError( self): entry = [1, 2, 3, '41', 5, 6] exc = None try: res = sort(entry) except Exception as err: exc = err self.assertIsNotNone(exc) self.assertEqual(str(exc), 'Elements need to be integers')
def next_permutation(L, *, cmp=None): Lx = list(_get_rank_list(L, cmp=cmp)) size = len(L) for i in range(size - 1, 0, -1): if Lx[i - 1] < Lx[i]: pos = i - 1 key = Lx[pos] break else: return Ly = Lx[pos + 1:] sort.sort(Ly) x = bisect_left(Ly, key) (Lx[pos], Ly[x]) = (Ly[x], Lx[pos]) Lx = Lx[:pos + 1] + Ly L_sorted = sort.sorted(L, cmp=cmp) return tuple(L_sorted[x] for x in Lx)
def __call__( self, date_string: str, sort_by_time: Optional[bool] = True ) -> List[Dict[str, Union[datetime, str]]]: try: response = requests.get(path.join(self.base_url, date_string)) response.raise_for_status() except Exception as error: raise RequestError(str(error)) data = response.json() if data[ERROR_NAME]: raise ValueError(data[ERROR_NAME]) log_list = list(map(LogItem.parse_obj, data[LOG_LIST_NAME])) if sort_by_time: sort(log_list, key=lambda x: x.created_at) return log_list
def blog(): if request.args.get('id'): blog_id = request.args.get('id') blog = Blog.query.filter_by(id=blog_id).first() return render_template("new-blog.html", blog=blog) elif request.args.get('user'): user_id = request.args.get('user') posts = Blog.query.filter_by(user_id=user_id).all() sort(posts) return render_template("blog.html", posts=posts, title="Posts by " + posts[0].user.username) else: posts = Blog.query.all() sort(posts) return render_template("blog.html", posts=posts, title="All Post")
def pickLocations(users): food_venues = list() act_venues = list() food = '' other = '' maxFood, maxOther = rankInterests(users) for Food in maxFood: # n food += foursquareAPI.get_category_id(Food) + ',' for Other in maxOther: # n other += foursquareAPI.get_category_id(Other) + ',' for user in users: # u for venue in foursquareAPI.search(user, food): # num returned #if venue not in food_venues: food_venues.append(venue) for venue in foursquareAPI.search(user, other): # num returned #if venue not in act_venues: act_venues.append(venue) food_venues = removeDuplicates(food_venues) act_venues = removeDuplicates(act_venues) food_venues = sort.sort(food_venues, 0, len(food_venues) - 1) # nlogn act_venues = sort.sort(act_venues, 0, len(food_venues) - 1) return food_venues, act_venues
def lsymtenmult(ind, tens, rank): product = [] lenth = len(ind) lenlist = [] count = 1 while count <= lenth: lenlist.append(count) count += 1 Sym = sym(lenlist).copy() for grpele in Sym: permuind = [] countass = 1 while countass <= lenth: permuind.append(ind[grpele[countass - 1] - 1]) countass += 1 for addtens in tens: addprod = [] tenind = 1 #print(addprod[0]) coeff = addtens[0] #addprod.append(coeff) while tenind <= lenth: coord = [] t = tenind # print(t) #print("a") coord.append(ind[grpele[t - 1] - 1]) coord.append(addtens[t]) # print(coord) ij = nrc(coord[0], rank).copy() kl = nrc(coord[1], rank).copy() #print(coord) # print(kl) if ij[1] == kl[0]: appd = rcn(ij[0], kl[1], rank) addprod.append(appd) # print(tens1) else: addprod = [] break tenind += 1 if addprod != []: sortedprod = sort(addprod) sortedprod.insert(0, coeff) product.append(addprod) #print(product) if product != []: prod = sum(product) #sum帮助我们合并同类项 else: prod = [] return prod
def search(query): """Take a search string and search our data! Query can be any number of words long, yeah! """ query = query.lower() words_in_query = query.split() matched_results = [] # loop through our data and look for matches! for index in range(len(data)): search_name = data[index].lower() original_name = data[index] # this is for multiple keywords total_matches = 0 frequency = frequency_count(query, search_name) for query_word in words_in_query: if query_word in search_name: # how many times does it match? total_matches += 1 # must match all words in query to count as a match! if total_matches == len(words_in_query): match_dict = { 'result': original_name, 'count': frequency, } matched_results.append(match_dict) sort(matched_results) return matched_results
def test_if_keys_in_dict_are_not_the_same_should_raise_error(self): entry = [{ 'name': 'Marto', 'age': 24 }, { 'name': 'Ivo', 'age': 27 }, { 'ime': 'Sashko', 'age': 25 }] exc = None try: res = sort(entry) except Exception as err: exc = err self.assertIsNotNone(exc) self.assertEqual(str(exc), 'Keys need to be the same for all elements')
def sort_hash(tab) : try : l = [] for (key, value) in tab.items() : l.append((value, key)) l = sort.sort(l, 0) r_l = [] for (val, key) in l : log = '%s %d' % (key, val) r_l.append(log) return r_l except : t, v, tb = sys.exc_info() print 'sort_hash(%s:%s)' % (t, v) return []
def __init__(self): QtGui.QMainWindow.__init__(self) # Cargamos la interfaz desde el archivo .ui uifile = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'main.ui') uic.loadUi(uifile, self) self.actionQuit.triggered.connect(QtGui.qApp.quit) self.actionSort.triggered.connect(sort.sort()) self.ser = serial.Serial(port='COM18', baudrate=9600, parity=serial.PARITY_NONE, stopbits=1, bytesize=8, timeout=2) self.qmc = False
def knearest(test, data, k): if len(data) <= 0: raise ValueError('N of data should not be zero') elif len(test) != len(data[0]): raise ValueError('dim of two test and data should be the same') else: count = 0 tmp = [] for i in range(len(data)): tmp.append([cartesian(test,data[i]),i]) s= sort.sort(tmp) sort_res = s.quicksort(0, len(tmp)-1,0) #print sort_res k_res = [] for j in range(k): k_res.append(sort_res[j]) #print k_res[0] return k_res
def handle(self, reader, writer): """ Колбэк-функция, которая вызывается каждый раз, когда устанавливается новое соединение с пользователем. Принимает два обязательных параметра. :param reader: StreamReader - объект для чтения данных из IO потока. :param writer: StreamWriter - объект для записи данных в IO поток. :return: None. """ data = yield from reader.read(self.data_chunk_size) if not data: return None message = json.loads(data.decode(), encoding='utf-8') print('[SERVER] Received task') sorted_data = sort(message['algorithm'], message['sequence']) print('[SERVER] Sorted sequence') message = json.dumps({'sequence': sorted_data}).encode('utf-8') writer.write(message) print('[SERVER] Sent answer') yield from writer.drain()
### 潜在トピック比による各楽曲のcos類似度を出力 for i in xrange(len(filenamelist)): plsa.Cos_Scale(dic_pd_z, filenamelist, i) ### 計算したcos尺度を簡易的にcsvファイルに書き込む plsa.Write_Csv_Cos(config.list_cos_scale_csv, "experiment/cos_scale/cos_scale.csv") ### 計算したcos尺度を表のようにしてcsvファイルに書き込む plsa.Write_Csv_Cos(config.list_cos_scale_table_csv, "experiment/cos_scale/cos_scale_table.csv") ####### print "\n" print "ソート中" sort.sort(p.pz_w, 0) # P(w|z)をソートして各トピックの出現確率の高い単語を出力 print "********************************************************" sort.sort(p.pz_d, 1) # P(d|z)をソートして各トピックの出現確率の高い文書を出力 ## ソート後のP(w|z)を表示してもソートする前と同じだが,結果はきちんと表示されているためOKとしている # print "P(w|z) = ", # print p.pz_w # P(w|z) # うまく動かない.csvファイルにしたほうがよいかも # # tf値を文書ごとにまとめる # list_tftf = [0] * 10 # for i in range(0,2): # f = open("/home/matsui-pc/matsui/experiment/tf" + str(i) + ".txt" , "r" ) # data = f.read() # print data
def testSample(self): A = [4, 5, 2, 1, 3] actual, cost = sort.sort(A) self.assertEquals(actual, [3,3,3]) self.assertEquals(cost, 6)
def sort_size(self): sort.sort(self._rows, size_comp)
def test_keep10(self): A = [10, 1, 1, 1, 1] actual, cost = sort.sort(A) self.assertEquals(actual, [10]) self.assertEquals(cost, 4)
import io import sort import parse def count_unique(sorted_file): f = io.open(sorted_file, 'r') count = 0 prev = '' s = 'init' while s != '': s = f.readline() if s != '' and s != prev: count += 1 prev = s return count if __name__ == '__main__': if len(sys.argv) < 2: print 'Usage [binary] [file]' sys.exit() in_filename = sys.argv[1] misidn_filename = 'misidn.txt' word_filename = 'word.txt' parse.parse(in_filename, misidn_filename, word_filename) sort.sort(misidn_filename, misidn_filename) sort.sort(word_filename, word_filename) count1 = count_unique(misidn_filename) count2 = count_unique(word_filename) print 'number of unique misidn is', count1 print 'number of unique words is', count2
####### print "*********** 最終的な出力 ************" ####### print "P(z) = ", ####### print p.pz # P(z) ####### print "P(d|z) = ", ####### print p.pz_d # P(d|z) ####### print "P(w|z) = ", ####### print p.pz_w # P(w|z) ####### print "P(z|d,w)", ####### print p.pdw_z # P(z|d,w) ####### print "\n" print "ソート中" sort.sort(p.pz_w) # # tf値を文書ごとにまとめる # list_tftf = [0] * 10 # for i in range(0,2): # f = open("./experiment/tf" + str(i) + ".txt" , "r" ) # data = f.read() # print data # list_tftf[i] = data # # list_tftf.append(data) # # list_tftf = map(int, list_tftf) # f.close() # print list_tftf
def testtype(self): a=sort.sort([5,3,6,1]) self.assertEqual(list,type(a))
def testoutput(self): a=sort.sort([6,3,9,5]) self.assertEqual([3,5,6,9],a)
def testSorted(self): A = [1, 2, 3, 4, 5] actual, cost = sort.sort(A) self.assertEquals(actual, A) self.assertEquals(cost, 0)
def test_remove10(self): A = [10, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1] actual, cost = sort.sort(A) self.assertEquals(actual, [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]) self.assertEquals(cost, 9)
def testSort1(self): file_list = ['88.txt', '5.txt', '11.txt'] sorted_list = sort.sort(file_list) success_file_list = ['5.txt', '11.txt', '88.txt'] self.assertEqual(success_file_list, sorted_list)
def testSort2(self): file_list = ['Ie5', 'Ie6', 'Ie4_01', 'Ie401sp2', 'Ie4_128', 'Ie501sp2'] sorted_list = sort.sort(file_list) success_file_list = ['Ie4_01', 'Ie4_128', 'Ie5', 'Ie6', 'Ie401sp2', 'Ie501sp2'] self.assertEqual(success_file_list, sorted_list)
from sys import argv import cat import echo import head import sort import tail if argv[1] == '-c': cat.cat(argv[2]) elif argv[1] == '-e': echo.echo(argv[2]) elif argv[1] == '-h': head.head(argv[2]) elif argv[1] == '-s': sort.sort(argv[2]) elif argv[1] == '-t': tail.tail(argv[2])
def sortdat(data, axis): s = sort.sort(data) sort_res = s.quicksort(0, len(data)-1,axis) return sort_res
def test_sort(self): self.assertEqual("aaaaabbbbcccdeeeeeghhhiiiiklllllllmnnnnooopprsssstttuuvwyyyy", sort.sort("When not studying nuclear physics, Bambi likes to play beach volleyball.")) self.assertEqual("abcde", sort.sort("edcab"))
def sort(self): sort.sort(self._rows, weight_comp)