def __init__(self): self.parser = MyParser() self.__base_url = base_url() self.__login_url = login_url() self.__failures = 0 self.__session = self.open_session() self.cache = Cache() self.file_io_driver = FileIODriver() self.current_url_id = int( self.cache.last_id) if self.cache.last_id else crawl_start_id() self.current_url = ''
def __init__(self): super(MainWindow, self).__init__() self.ui = Ui_MainWindow() self.ui.setupUi(self) self.setWindowTitle('Програма для морфологічного аналізу слів, та вирішування тестів') self.ui.pushButton_solve.clicked.connect(self.solve) self.parser_obj = MyParser() self.another = Another() self.ab = About() self.settings = Settings() self.ui.action.triggered.connect(self.another.instruction) self.ui.action_about.triggered.connect(self.ab.about) self.ui.action_3.triggered.connect(self.settings.show_set) self.settings.ui.checkBox.clicked.connect(self.hide_predict) self.show_message('Програма не гарантує 100% правильність відповіді')
def main(): # コマンドライン引数を取得 args = sys.argv # 関数定義 funcs = args[1] # 定数定義 defines = args[2] # 入力ファイル infile = args[3] # 出力ファイル outfile = args[4] # 字句解析する parser = MyParser(Lexer(LexerReader(infile)), funcs, defines) # 構文解析する parser.parse(outfile)
def main(url): parser = MyParser(url) text = parser.bpArtGetText() print(text) pos_tagged = semantics.text_to_pos_tags(text) entities = semantics.entities(pos_tagged, "PER", "ORG", "LOC", "DAT", "FAC", "GPE") entities = semantics.ordered_set_of_tags(entities) adjectives = semantics.adjectives(pos_tagged) adjectives = semantics.ordered_set_of_tags(adjectives) for tag in entities and adjectives: print(tag) for ent in entities: tag_dict[ent] = "ENT" print(entities) for adj in adjectives: tag_dict[adj] = "ADJ" print(adjectives) mean_arousal = lookup.mean(pos_tagged) print(mean_arousal)
while (True): # prompt user for input file name input_file = input( 'Input file name [Press \'Enter\' to user default - input.txt]: ') # check if blank, change to default if (input_file == ''): input_file = 'input.txt' if (os.path.isfile(input_file)): break else: print('ImportError: could not find file', input_file) print('Found file', input_file) # open file and read all lines (removing any '\n' chars) with open(input_file, 'r') as open_file: lines = open_file.readlines() # parse the text to generate stack code parser = MyParser(print_tree, time_parse) output = parser.parse(lines) if (output != None): print('Finished parsing with no errors.') # print output print('\nPrinting generated output:') for node in output: print(node)
def setUp(self): self.parser = MyParser()
### To Add: ### - Telephone number search ### - import pdf ### - output resulting data to json or similar ### - add functionality to run the script in a different folder from filefinder import FileFinder from my_parser import MyParser files = FileFinder() list_of_files, flag = files.get_files() parsed_files = MyParser(list_of_files, flag) parsed_files.write_json_file()
from my_parser import MyParser from aro_lookup import AroLookup from api_calls import tagSearch import semantics import sys genresUrl = 'http://labrosa.ee.columbia.edu/millionsong/sites/default/files/AdditionalFiles/unique_terms.txt' testUrl = 'http://www.bbc.com/news/technology-31552029' testSentence = "This is an ultimate, to beat Chelsea, who I think will go on and win the Champion's League - it really is." lookup = AroLookup() tag_dict = {} logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO) parser = MyParser(testUrl) text = parser.bpArtGetText() print(text) pos_tagged = semantics.text_to_pos_tags(text) entities = semantics.entities(pos_tagged, "PER", "ORG", "LOC", "DAT", "FAC", "GPE") print(entities) print(semantics.ordered_set_of_tags(entities)) def main(url): parser = MyParser(url) text = parser.bpArtGetText() print(text) pos_tagged = semantics.text_to_pos_tags(text) entities = semantics.entities(pos_tagged, "PER", "ORG", "LOC", "DAT",