def main(): if len(sys.argv) != 2: parsing.error(0, "0") inst = parsing.parsing(sys.argv[1]) print_rubik.print_rubik(rubik) shuffle.shuffle(rubik, inst) if utils.check_rubik(rubik) == False: print("the cube is not solved") else: print("the cube is solved")
def lisp(filename): tokens = lexing.lexing(filename) # print tokens ast = parsing.parsing(tokens) print ast interpret(ast) print '*'*20 print "Succesfully ran %s." % filename print env print ""
def cfg_construction(file_data_model): # recognize NSF-Facing Interface Data Model fnfi = open(file_data_model, 'r') line = fnfi.readline() # ignore first line field = '' requiredinfo = [] contentcfglist = [] order = 0 # declare stack for Data Model tree recognition st = stack.Stack() st.push('', False) # read all line while True: line = fnfi.readline() if not line: break # parsing line lineparsing = parsing.parsing(line) skip = lineparsing[0] level = lineparsing[1] field = lineparsing[2] isContent = lineparsing[3] # declare CFG node if isContent: cfgnode = TextfreeGrammar('content', level) cfgnode.setinfo(len(requiredinfo)) requiredinfo.append(field) contentcfglist.append(cfgnode) else: cfgnode = TextfreeGrammar('structure', level) cfgnode.settag('<' + field + '>', '</' + field + '>') cfgnode.setname(field) cfgnode.setorder(order) order += 1 # CFG node connection by investigating level while st.level() != level: st.pop() cfgnode.setParent(st.topnode()) st.push(cfgnode, skip) # print('now field: '+field+', level: '+str(st.level())) fnfi.close() print('Complete to construct CFG for ' + file_data_model + '\n') #print(requiredinfo) return [contentcfglist, requiredinfo]
def experimental_events(): # return events_TOY() datalow = array( 'd', map( sub, map(sub, map(sub, parsing("data ev/"), parsing("data e2pi/")), parsing("data e4pi/")), parsing("data bkg/"))) ddatalow = array('d') i = 0 for symb in parsing("data ev/"): ddatalow.append( float( sqrt(symb + parsing("data de2pi/")[i]**2 + parsing("data de4pi/")[i]**2 + parsing("data dbkg/")[i]**2))) i = i + 1 Ndata = datalow + parsing("data evh/") dNdata = ddatalow + parsing("data devh/") #print(Ndata) i = 0 Ndata_true = array('d') for el in Ndata: Ndata_true.append(el) Ndata[i] = Ndata[i] / dNdata[i] i = i + 1 #print(Ndata) print("number of points = ", len(dNdata)) with open('data/Ndata.txt', 'w') as file: i = 0 for el in Ndata: file.write(str(Ndata_true[i])) file.write(' ') file.write(str(dNdata[i])) file.write("\n") i = i + 1 return (Ndata, dNdata, Ndata_true)
def dfa_construction(file_data_model): # recognize Consumer-Facing Interface Data Model fcfi = open(file_data_model, 'r') line = fcfi.readline() # ignore first line field = '' extractedinfo = [] index = 0 # declare stack for automatic construction st = stack.Stack() node_accepter = DFAnode('accepter') st.push(node_accepter, False) # read all line while True: line = fcfi.readline() if not line: break # parsing line lineparsing = parsing.parsing(line) skip = lineparsing[0] level = lineparsing[1] field = lineparsing[2] isExtractor = lineparsing[3] # declare DFA node if isExtractor == True: dfanode = DFAnode('extractor') dfanode.setinfo(index) index += 1 extractedinfo.append(field) else: dfanode = DFAnode('middle') # DFA node connection by investigating level while st.level() != level: st.pop() st.topnode().connectNode(dfanode, field) st.push(dfanode, skip) # print('now field: '+field+', level: '+str(st.level())) fcfi.close() print('Complete to construct DFA for '+file_data_model) return [node_accepter, extractedinfo]
def execute(cell_id=None): """Gets piece of code from cell_id and executes it""" try: cell_id = int(cell_id) except ValueError as e: logger.warning(e) return redirect('/') global current_execute_count try: current_execute_count += 1 execute_counters[cell_id] = current_execute_count inputs[cell_id] = request.form['input{}'.format(cell_id)] texts = parsing(inputs[cell_id].split()) result = antiplagiat.compare(texts) except BaseException as e: # anything could happen inside, even `exit()` call result = [str(e)] outputs[cell_id] = result return redirect('/')
def handle_message(event): hoge = event.message.text command = parsing.parsing(hoge) if command == None: return elif command['command'] == 'にゃーん': line_bot_api.reply_message(event.reply_token, TextSendMessage("にゃ〜ん")) elif command['command'] == 'せいせき': room_id, _, room_name = my_database.current_tournament() today = datetime.now(JST) my_database.update_score("{:%Y%m%d}".format(today), room_id) if today.time() < time(1, 0, 0): my_database.update_score( "{:%Y%m%d}".format(today - timedelta(days=1)), room_id) score = my_database.get_score_sum(room_id) text = f"{room_name}" for r in score: text += "\n" text += r[0] + ":" + str(r[1]) print(text) line_bot_api.reply_message(event.reply_token, TextSendMessage(text)) elif command['command'] == "こうしん": if len(command['args']) != 2: line_bot_api.reply_message( event.reply_token, TextSendMessage(text="usage : -こうしん [date(YYYYmmdd)] [大会名]")) return date, taikai = command['args'] sql = f"SELECT room_id FROM tournaments WHERE name='{taikai}'" room = my_database.sql_requests(sql)[0][0] res = my_database.update_score(date, room) line_bot_api.reply_message(event.reply_token, TextSendMessage(text=res)) # elif command["command"] == "たいかいとうろく": # if len(command['args']) != 3: # line_bot_api.reply_message( # event.reply_token, # TextSendMessage(text="usage : -たいかいとうろく [name] [roomid] [url]")) # return # name, room, url = command['args'] # text = my_database.set_tournament(name, room, url) # line_bot_api.reply_message( # event.reply_token, # TextSendMessage(text=text)) elif command['command'] == "ゆーざー": if len(command['args']) != 2: line_bot_api.reply_message( event.reply_token, TextSendMessage(text="usage : -ゆーざー [name] [tenhouId]")) return nickname, tenhou_id = command['args'] my_database.set_user(nickname, tenhou_id) line_bot_api.reply_message( event.reply_token, TextSendMessage(text=f"{tenhou_id}を{nickname}さんのアカウントとして記憶しました。"))
elif (result == 2): value = 'undetermined' color = '\033[95m' print(color + key + ' is ' + value) if __name__ == "__main__": global rules global initial global facts if (len(sys.argv) != 2 and len(sys.argv) != 3): error('python main.py [file]') filename = sys.argv[1] content = '' try: f = open(filename, 'r') content = f.read() f.close() except: error('error opening file') content = cleanContent(content) checkLines(content) initial = findLine(content, '=') query = findLine(content, '?') rules = getRules(content) rules = parsing(rules) params.initFacts() params.setRules(rules) params.setInitial(initial) displayResult(query)
return (synonim) except: return False def posi_nega(posi_list, nega_list): try: results = model.wv.most_similar(positive=posi_list, negative=nega_list) synonim = [] for result in results: synonim.append(result[0]) return (synonim) except: return False while (True): string = input('計算式:') posi_list, nega_list = parsing(string) posi_list = exist(posi_list) nega_list = exist(nega_list) if (len(posi_list) == 0): print(nega_nega(nega_list)) elif (len(nega_list) == 0): print(posi_posi(posi_list)) else: print(posi_nega(posi_list, nega_list)) print("---")
from pylab import * from plyfile import PlyData, PlyElement import NearestNeighbors as nn import parsing import analysis from sklearn import svm from sklearn.svm import SVC from sklearn import tree from sklearn.ensemble import RandomForestClassifier from sklearn.ensemble import AdaBoostClassifier if __name__ == '__main__': # load raw txt data parser = parsing.parsing() extracted_features_path = './Data/Cornell/features/office_data_nodefeats.txt' raw_data = parser.parse_txt_data(extracted_features_path) # load ply data, here just test for office scene 1 pic_path_set = ['./Data/Cornell/office_data_ply/scene5.ply'] for pic_path in pic_path_set: office_data = parser.parse_ply_data(pic_path) print(office_data) # create analysis utils analyser = analysis.analysis() #========================================================= # K-NN #=========================================================
f'Запущен режим восстановления (допарсинга) из файла {FILE_RECOVERY}' ) file_work = os.getcwd() + DIR_RECOVERY + FILE_RECOVERY logger.info(f'Получаем из файла {file_work} данные для парсинга') try: article_numbers = get_article_from_file(file_work) if article_numbers: logger.success( f'Успешно получены данные. Общее кол-во арктикулов {len(article_numbers)}' ) else: logger.warning( f'Файл {file_work} пустой. Завершаем работу скрипта') sys.exit(1) except Exception as ex: logger.error(f'Ошибка при получении данных из файла {ex}') logger.warning(f'Завершаем работу скрипта с ошибкой') sys.exit(1) try: # основной парсинг товаров result_parsing = parsing.parsing(article_numbers=article_numbers) # сохранение результатов парсинга в файл save_result_to_csv_file(result=result_parsing) logger.success(f'Скрипт успешно завершен') except Exception as ex: logger.critical( f'Выполнение скрипта завершено с ошибкой. См. лог-файлы или обратитесь к разработчику {ex}' )
def home(): news = parsing() return render_template("home.html", title="Home", news=news, day=str(date.today()))
def synthesis(): return parsing.parsing(request.get_json(force=True))
def test_list(self): self.url = ["https://www.bbc.com/news/world-europe-52510545"] self.assertIsInstance(parsing.parsing(self.url), list)
import os, os.path import parsing result = [] partys = ["AfD", "Die_Gruene", "Die_Linke", "Die_Partei", "MLPD", "Piratenpartei", "SPD", "Tierschutzpartei"] for party in partys: dict = {party:[]} print(party) plakats = os.listdir("dataset/{}".format(party)) if ".DS_Store" in plakats: plakats.remove(".DS_Store") for plakat in plakats: print(plakat) dict.get(party).append(parsing.parsing("{}/{}".format(party, plakat))) result.append(dict) print(result)
def main(filename): rules = parsing(filename) lexical_rules = transform(rules)
def test_size(self): self.url = ["https://www.bbc.com/news/world-europe-52510545", "https://www.bbc.com/news/world-europe-52510545"] self.assertEqual(len(parsing.parsing(self.url)), 2)