def test_get_sub_elements(self): # Declare instance and prepare XML element with its sub-elements instance = LexicalEntry() element = Element("LexicalEntry") lemma = SubElement(element, "Lemma") SubElement(lemma, "feat", att="lexeme", val="hello") SubElement(element, "feat", att="partOfSpeech", val="toto") SubElement(element, "feat", att="status", val="draft") # Test results get_sub_elements(instance, element) self.assertEqual(instance.get_lexeme(), "hello") self.assertEqual(instance.get_partOfSpeech(), "toto") self.assertEqual(instance.get_status(), "draft") del instance, element, lemma
def test_mdf_write(self): import sys, os # Create LMF objects lexical_entry = LexicalEntry() lexical_entry.lemma = Lemma() lexical_entry.partOfSpeech = "toto" lexical_entry.status = "draft" lexical_entry.lemma.lexeme = "hello" lexicon = Lexicon() lexicon.add_lexical_entry(lexical_entry) # Write MDF file and test result utest_path = sys.path[0] + '/' mdf_filename = utest_path + "output.txt" mdf_write(lexicon, mdf_filename) mdf_file = open(mdf_filename, "r") expected_lines = ["\\lx hello" + EOL, "\\ps toto" + EOL, "\\st draft" + EOL, EOL] self.assertListEqual(expected_lines, mdf_file.readlines()) mdf_file.close() # Customize mapping lmf2mdf = dict({ "lx" : lambda lexical_entry: lexical_entry.get_status(), "ps" : lambda lexical_entry: lexical_entry.get_partOfSpeech(), "st" : lambda lexical_entry: lexical_entry.get_lexeme() }) order = ["st", "lx", "ps"] # Write MDF file and test result mdf_write(lexicon, mdf_filename, lmf2mdf, order) mdf_file = open(mdf_filename, "r") expected_lines = ["\\st hello" + EOL, "\\lx draft" + EOL, "\\ps toto" + EOL, EOL] self.assertListEqual(expected_lines, mdf_file.readlines()) mdf_file.close() del lexical_entry.lemma lexical_entry.lemma = None del lexical_entry, lexicon # Remove MDF file os.remove(mdf_filename)
def test_tex_write(self): import sys, os # Create LMF objects lexical_entry = LexicalEntry() lexical_entry.lemma = Lemma() lexical_entry.partOfSpeech = "toto" lexical_entry.status = "draft" lexical_entry.lemma.lexeme = "hello" lexicon = Lexicon() lexicon.add_lexical_entry(lexical_entry) lexical_resource = LexicalResource() lexical_resource.add_lexicon(lexicon) # Write LaTeX file and test result utest_path = sys.path[0] + '/' tex_filename = utest_path + "output.tex" tex_write(lexical_resource, tex_filename) tex_file = open(tex_filename, "r") begin_lines = [ EOL, "\\begin{document}" + EOL, "\\maketitle" + EOL, "\\newpage" + EOL, EOL, "\\def\\mytextsc{\\bgroup\\obeyspaces\\mytextscaux}" + EOL, "\\def\\mytextscaux#1{\\mytextscauxii #1\\relax\\relax\\egroup}" + EOL, "\\def\\mytextscauxii#1{%" + EOL, "\\ifx\\relax#1\\else \\ifcat#1\\@sptoken{} \\expandafter\\expandafter\\expandafter\\mytextscauxii\\else" + EOL, "\\ifnum`#1=\\uccode`#1 {\\normalsize #1}\\else {\\footnotesize \\uppercase{#1}}\\fi \\expandafter\\expandafter\\expandafter\\mytextscauxii\\expandafter\\fi\\fi}" + EOL, EOL, "\\setlength\\parindent{0cm}" + EOL, EOL, "\\addmediapath{.}" + EOL, "\\addmediapath{./mp3}" + EOL, "\\addmediapath{./wav}" + EOL, "\\graphicspath{{" + os.path.abspath('.') + "/pylmflib/output/img/}}" + EOL, EOL, "\\newpage" + EOL, "\\begin{multicols}{2}" + EOL, EOL ] end_lines = ["\end{multicols}" + EOL, "\end{document}" + EOL] expected_lines = [ "\\newpage" + EOL, "\\section*{\\centering- \\textbf{\ipa{H}} \\textbf{\ipa{h}} -}" + EOL, #"\\pdfbookmark[1]{\ipa{ H h }}{ H h }" + EOL, "\\paragraph{\\hspace{-0.5cm} \\textbf{\ipa{hello}}} \\hypertarget{01}{}" + EOL, "\markboth{\\textbf{\\ipa{hello}}}{}" + EOL, "\\textit{Status:} draft" + EOL, "\lhead{\\firstmark}" + EOL, "\\rhead{\\botmark}" + EOL, EOL ] self.assertListEqual(begin_lines + expected_lines + end_lines, tex_file.readlines()) tex_file.close() # Customize mapping my_lmf_tex = dict({ "Lemma.lexeme": lambda lexical_entry: "is " + lexical_entry.get_lexeme( ) + "." + EOL, "LexicalEntry.id": lambda lexical_entry: "The lexical entry " + str(lexical_entry. get_id()) + " ", "LexicalEntry.partOfSpeech": lambda lexical_entry: "Its grammatical category is " + lexical_entry.get_partOfSpeech() + "." + EOL, "LexicalEntry.status": lambda lexical_entry: "Warning: " + lexical_entry.get_status( ) + " version!" + EOL }) my_order = [ "LexicalEntry.id", "Lemma.lexeme", "LexicalEntry.partOfSpeech", "LexicalEntry.status" ] def lmf2tex(entry, font): result = "" for attribute in my_order: result += my_lmf_tex[attribute](entry) return result # Write LaTeX file and test result tex_write(lexical_resource, tex_filename, None, None, lmf2tex, font) tex_file = open(tex_filename, "r") expected_lines = [ "\\newpage" + EOL, "\\section*{\\centering- \\textbf{\ipa{H}} \\textbf{\ipa{h}} -}" + EOL, #"\\pdfbookmark[1]{\ipa{ H h }}{ H h }" + EOL, "The lexical entry 01 is hello." + EOL, "Its grammatical category is toto." + EOL, "Warning: draft version!" + EOL, "\lhead{\\firstmark}" + EOL, "\\rhead{\\botmark}" + EOL, EOL ] self.assertListEqual(begin_lines + expected_lines + end_lines, tex_file.readlines()) tex_file.close() del lexical_entry.lemma lexical_entry.lemma = None del lexical_entry, lexicon lexicon = None del lexical_resource # Remove LaTeX file os.remove(tex_filename)
def mdf_read(filename=None, mdf2lmf=mdf_lmf, lexicon=None, id=None, encoding=ENCODING): """! @brief Read an MDF file. @param filename The name of the MDF file to read with full path, for instance 'user/input.txt'. @param mdf2lmf A Python dictionary describing the mapping between MDF markers and LMF representation. Default value is 'mdf_lmf' dictionary defined in 'pylmflib/config/mdf.py'. Please refer to it as an example. @param lexicon An existing Lexicon to fill with lexical entries to read. @param id A Python string identifying the lexicon to create. @param encoding Use 'utf-8' encoding by default. Otherwise, user has to precise the native encoding of its document. @return A Lexicon instance containing all lexical entries. """ import re # If not provided, create a Lexicon instance to contain all lexical entries if lexicon is None: lexicon = Lexicon(id) # Read in unicode if filename is None: filename = lexicon.get_entrySource() else: # Set lexicon attribute lexicon.set_entrySource(filename) # Read in unicode mdf_file = open_read(filename, encoding=encoding) # MDF syntax is the following: '\marker value' mdf_pattern = """^\\\(\w*) (<(.*)>)? ?(.*)$""" # Add each lexical entry to the lexicon current_entry = None sub_entry = None component = None main_entry = None for line in mdf_file.readlines(): # Do not parse empty lines if line != EOL: result = re.match(mdf_pattern, line) if result is None: # Line is empty => continue parsing next line continue marker = result.group(1) attrs = result.group(3) value = result.group(4) # Do not consider markers starting with an underscore character (e.g. '_sh' and '_DateStampHasFourDigitYear') if marker[0] == '_': continue # Remove trailing spaces and end-of-line characters value = value.rstrip(' \r\n') # Do not consider empty fields if value == "": continue # Check if the current entry is a multiword expression is_mwe = False if marker == "lf": lf = value.split(" = ") if lf[0].startswith("Component"): component_nb = lf[0].lstrip("Component") value = lf[1] is_mwe = True # 'lx' and 'se' markers indicate a new entry if marker == "lx" or marker == "se" or is_mwe: # Compute a unique identifier uid = uni2sampa(value) if marker == "se": # Create a subentry sub_entry = LexicalEntry(uid) # An MDF subentry corresponds to an LMF lexical entry mdf2lmf["lx"](value, sub_entry) # Add it to the lexicon lexicon.add_lexical_entry(sub_entry) # Manage main entry if main_entry is None: main_entry = current_entry else: current_entry = main_entry # Set main entry homonym_nb = current_entry.get_homonymNumber() if homonym_nb is None: homonym_nb = "" sub_entry.create_and_add_related_form( current_entry.get_lexeme() + homonym_nb, "main entry") elif is_mwe: # Create a subentry component = LexicalEntry(uid) # An MDF subentry corresponds to an LMF lexical entry mdf2lmf["lx"](value, component) # Add it to the lexicon lexicon.add_lexical_entry(component) # Manage current entry if sub_entry is not None: current_entry = sub_entry # Set component homonym_nb = current_entry.get_homonymNumber() if homonym_nb is None: homonym_nb = "" current_entry.create_and_add_component(component_nb, value) component.create_and_add_related_form( current_entry.get_lexeme() + homonym_nb, "complex predicate") component.set_independentWord(False) else: # Create a new entry current_entry = LexicalEntry(uid) # Add it to the lexicon lexicon.add_lexical_entry(current_entry) # Reset main entry main_entry = None # Map MDF marker and value to LMF representation try: if attrs is not None: # There are attributes attributes = {} # Remove quotation marks from attributes if any attrs = attrs.replace('"', '') for attr in attrs.split(' '): attributes.update( {attr.split('=')[0]: attr.split('=')[1]}) # A customized marker starts with '__' characters mdf2lmf["__" + marker](attributes, value, current_entry) else: mdf2lmf[marker](value, current_entry) if sub_entry is not None: current_entry = sub_entry sub_entry = None if component is not None: sub_entry = current_entry current_entry = component component = None except KeyError: # When printing, we need to convert 'unicode' into 'str' using 'utf-8' encoding: print Warning( "MDF marker '%s' encountered for lexeme '%s' is not defined in configuration" % (marker.encode(ENCODING), current_entry.get_lexeme().encode(ENCODING))) except Error as exception: exception.handle() mdf_file.close() return lexicon
def test_tex_write(self): import sys, os # Create LMF objects lexical_entry = LexicalEntry() lexical_entry.lemma = Lemma() lexical_entry.partOfSpeech = "toto" lexical_entry.status = "draft" lexical_entry.lemma.lexeme = "hello" lexicon = Lexicon() lexicon.add_lexical_entry(lexical_entry) lexical_resource = LexicalResource() lexical_resource.add_lexicon(lexicon) # Write LaTeX file and test result utest_path = sys.path[0] + '/' tex_filename = utest_path + "output.tex" tex_write(lexical_resource, tex_filename) tex_file = open(tex_filename, "r") begin_lines = [EOL, "\\begin{document}" + EOL, "\\maketitle" + EOL, "\\newpage" + EOL, EOL, "\\def\\mytextsc{\\bgroup\\obeyspaces\\mytextscaux}" + EOL, "\\def\\mytextscaux#1{\\mytextscauxii #1\\relax\\relax\\egroup}" + EOL, "\\def\\mytextscauxii#1{%" + EOL, "\\ifx\\relax#1\\else \\ifcat#1\\@sptoken{} \\expandafter\\expandafter\\expandafter\\mytextscauxii\\else" + EOL, "\\ifnum`#1=\\uccode`#1 {\\normalsize #1}\\else {\\footnotesize \\uppercase{#1}}\\fi \\expandafter\\expandafter\\expandafter\\mytextscauxii\\expandafter\\fi\\fi}" + EOL, EOL, "\\setlength\\parindent{0cm}" + EOL, EOL, "\\addmediapath{.}" + EOL, "\\addmediapath{./mp3}" + EOL, "\\addmediapath{./wav}" + EOL, "\\graphicspath{{" + os.path.abspath('.') + "/pylmflib/output/img/}}" + EOL, EOL, "\\newpage" + EOL, "\\begin{multicols}{2}" + EOL, EOL ] end_lines = [ "\end{multicols}" + EOL, "\end{document}" + EOL ] expected_lines = [ "\\newpage" + EOL, "\\section*{\\centering- \\textbf{\ipa{H}} \\textbf{\ipa{h}} -}" + EOL, #"\\pdfbookmark[1]{\ipa{ H h }}{ H h }" + EOL, "\\paragraph{\\hspace{-0.5cm} \\textbf{\ipa{hello}}} \\hypertarget{01}{}" + EOL, "\markboth{\\textbf{\\ipa{hello}}}{}" + EOL, "\\textit{Status:} draft" + EOL, "\lhead{\\firstmark}" + EOL, "\\rhead{\\botmark}" + EOL, EOL ] self.assertListEqual(begin_lines + expected_lines + end_lines, tex_file.readlines()) tex_file.close() # Customize mapping my_lmf_tex = dict({ "Lemma.lexeme" : lambda lexical_entry: "is " + lexical_entry.get_lexeme() + "." + EOL, "LexicalEntry.id" : lambda lexical_entry: "The lexical entry " + str(lexical_entry.get_id()) + " ", "LexicalEntry.partOfSpeech" : lambda lexical_entry: "Its grammatical category is " + lexical_entry.get_partOfSpeech() + "." + EOL, "LexicalEntry.status" : lambda lexical_entry: "Warning: " + lexical_entry.get_status() + " version!" + EOL }) my_order = ["LexicalEntry.id", "Lemma.lexeme", "LexicalEntry.partOfSpeech", "LexicalEntry.status"] def lmf2tex(entry, font): result = "" for attribute in my_order: result += my_lmf_tex[attribute](entry) return result # Write LaTeX file and test result tex_write(lexical_resource, tex_filename, None, None, lmf2tex, font) tex_file = open(tex_filename, "r") expected_lines = [ "\\newpage" + EOL, "\\section*{\\centering- \\textbf{\ipa{H}} \\textbf{\ipa{h}} -}" + EOL, #"\\pdfbookmark[1]{\ipa{ H h }}{ H h }" + EOL, "The lexical entry 01 is hello." + EOL, "Its grammatical category is toto." + EOL, "Warning: draft version!" + EOL, "\lhead{\\firstmark}" + EOL, "\\rhead{\\botmark}" + EOL, EOL ] self.assertListEqual(begin_lines + expected_lines + end_lines, tex_file.readlines()) tex_file.close() del lexical_entry.lemma lexical_entry.lemma = None del lexical_entry, lexicon lexicon = None del lexical_resource # Remove LaTeX file os.remove(tex_filename)
def mdf_read(filename=None, mdf2lmf=mdf_lmf, lexicon=None, id=None, encoding=ENCODING): """! @brief Read an MDF file. @param filename The name of the MDF file to read with full path, for instance 'user/input.txt'. @param mdf2lmf A Python dictionary describing the mapping between MDF markers and LMF representation. Default value is 'mdf_lmf' dictionary defined in 'pylmflib/config/mdf.py'. Please refer to it as an example. @param lexicon An existing Lexicon to fill with lexical entries to read. @param id A Python string identifying the lexicon to create. @param encoding Use 'utf-8' encoding by default. Otherwise, user has to precise the native encoding of its document. @return A Lexicon instance containing all lexical entries. """ import re # If not provided, create a Lexicon instance to contain all lexical entries if lexicon is None: lexicon = Lexicon(id) # Read in unicode if filename is None: filename = lexicon.get_entrySource() else: # Set lexicon attribute lexicon.set_entrySource(filename) # Read in unicode mdf_file = open_read(filename, encoding=encoding) # MDF syntax is the following: '\marker value' mdf_pattern = """^\\\(\w*) (<(.*)>)? ?(.*)$""" # Add each lexical entry to the lexicon current_entry = None sub_entry = None component = None main_entry = None for line in mdf_file.readlines(): # Do not parse empty lines if line != EOL: result = re.match(mdf_pattern, line) if result is None: # Line is empty => continue parsing next line continue marker = result.group(1) attrs = result.group(3) value = result.group(4) # Do not consider markers starting with an underscore character (e.g. '_sh' and '_DateStampHasFourDigitYear') if marker[0] == '_': continue # Remove trailing spaces and end-of-line characters value = value.rstrip(' \r\n') # Do not consider empty fields if value == "": continue # Check if the current entry is a multiword expression is_mwe = False if marker == "lf": lf = value.split(" = ") if lf[0].startswith("Component"): component_nb = lf[0].lstrip("Component") value = lf[1] is_mwe = True # 'lx' and 'se' markers indicate a new entry if marker == "lx" or marker == "se" or is_mwe: # Compute a unique identifier uid = uni2sampa(value) if marker == "se": # Create a subentry sub_entry = LexicalEntry(uid) # An MDF subentry corresponds to an LMF lexical entry mdf2lmf["lx"](value, sub_entry) # Add it to the lexicon lexicon.add_lexical_entry(sub_entry) # Manage main entry if main_entry is None: main_entry = current_entry else: current_entry = main_entry # Set main entry homonym_nb = current_entry.get_homonymNumber() if homonym_nb is None: homonym_nb = "" sub_entry.create_and_add_related_form(current_entry.get_lexeme() + homonym_nb, "main entry") elif is_mwe: # Create a subentry component = LexicalEntry(uid) # An MDF subentry corresponds to an LMF lexical entry mdf2lmf["lx"](value, component) # Add it to the lexicon lexicon.add_lexical_entry(component) # Manage current entry if sub_entry is not None: current_entry = sub_entry # Set component homonym_nb = current_entry.get_homonymNumber() if homonym_nb is None: homonym_nb = "" current_entry.create_and_add_component(component_nb, value) component.create_and_add_related_form(current_entry.get_lexeme() + homonym_nb, "complex predicate") component.set_independentWord(False) else: # Create a new entry current_entry = LexicalEntry(uid) # Add it to the lexicon lexicon.add_lexical_entry(current_entry) # Reset main entry main_entry = None # Map MDF marker and value to LMF representation try: if attrs is not None: # There are attributes attributes = {} # Remove quotation marks from attributes if any attrs = attrs.replace('"', '') for attr in attrs.split(' '): attributes.update({attr.split('=')[0] : attr.split('=')[1]}) # A customized marker starts with '__' characters mdf2lmf["__" + marker](attributes, value, current_entry) else: mdf2lmf[marker](value, current_entry) if sub_entry is not None: current_entry = sub_entry sub_entry = None if component is not None: sub_entry = current_entry current_entry = component component = None except KeyError: # When printing, we need to convert 'unicode' into 'str' using 'utf-8' encoding: print Warning("MDF marker '%s' encountered for lexeme '%s' is not defined in configuration" % (marker.encode(ENCODING), current_entry.get_lexeme().encode(ENCODING))) except Error as exception: exception.handle() mdf_file.close() return lexicon